Пример #1
0
    def test_generate_pair(self):
        # test adaptive pair algorithm
        self.package_name = "adaptive"

        results = generate_pair(package_name=self.package_name,
                                scored_objects=self.scored_objects,
                                comparison_pairs=self.comparisons)

        self.assertIsInstance(results, ComparisonPair)
        min_key = min([results.key1, results.key2])
        max_key = max([results.key1, results.key2])

        # round zero items should be selected
        self.assertEqual(min_key, 3)
        self.assertEqual(max_key, 4)
        self.assertEqual(results.winner, None)

        # test random pair algorithm
        self.package_name = "random"

        results = generate_pair(package_name=self.package_name,
                                scored_objects=self.scored_objects,
                                comparison_pairs=self.comparisons)

        self.assertIsInstance(results, ComparisonPair)
        min_key = min([results.key1, results.key2])
        max_key = max([results.key1, results.key2])

        # round zero items should be selected
        self.assertEqual(min_key, 3)
        self.assertEqual(max_key, 4)
        self.assertEqual(results.winner, None)
Пример #2
0
    def test_generate_pair(self):
        # test adaptive pair algorithm
        self.package_name = "adaptive"

        results = generate_pair(
            package_name=self.package_name,
            scored_objects=self.scored_objects,
            comparison_pairs=self.comparisons
        )

        self.assertIsInstance(results, ComparisonPair)
        min_key = min([results.key1, results.key2])
        max_key = max([results.key1, results.key2])

        # round zero items should be selected
        self.assertEqual(min_key, 3)
        self.assertEqual(max_key, 4)
        self.assertEqual(results.winner, None)

        # test adaptive min delta pair algorithm
        self.package_name = "adaptive_min_delta"

        results = generate_pair(
            package_name=self.package_name,
            scored_objects=self.scored_objects,
            comparison_pairs=self.comparisons,
            criterion_scores=self.criterion_scores,
            criterion_weights=self.criterion_weights
        )

        self.assertIsInstance(results, ComparisonPair)
        min_key = min([results.key1, results.key2])
        max_key = max([results.key1, results.key2])

        # round zero items should be selected
        self.assertEqual(min_key, 3)
        self.assertEqual(max_key, 4)
        self.assertEqual(results.winner, None)

        # test random pair algorithm
        self.package_name = "random"

        results = generate_pair(
            package_name=self.package_name,
            scored_objects=self.scored_objects,
            comparison_pairs=self.comparisons
        )

        self.assertIsInstance(results, ComparisonPair)
        min_key = min([results.key1, results.key2])
        max_key = max([results.key1, results.key2])

        # round zero items should be selected
        self.assertEqual(min_key, 3)
        self.assertEqual(max_key, 4)
        self.assertEqual(results.winner, None)
Пример #3
0
    def _get_new_comparison_pair(cls, course_id, assignment_id, user_id,
                                 pairing_algorithm, comparisons):
        from . import Assignment, UserCourse, CourseRole, Answer, AnswerScore, PairingAlgorithm

        # ineligible authors - eg. instructors, TAs, dropped student, current user
        non_students = UserCourse.query \
            .filter(and_(
                UserCourse.course_id == course_id,
                UserCourse.course_role != CourseRole.student
            ))
        ineligible_user_ids = [non_student.user_id \
            for non_student in non_students]
        ineligible_user_ids.append(user_id)

        answers_with_score = Answer.query \
            .with_entities(Answer, AnswerScore.score ) \
            .outerjoin(AnswerScore) \
            .filter(and_(
                Answer.user_id.notin_(ineligible_user_ids),
                Answer.assignment_id == assignment_id,
                Answer.active == True,
                Answer.practice == False,
                Answer.draft == False
            )) \
            .all()

        scored_objects = []
        for answer_with_score in answers_with_score:
            scored_objects.append(
                ScoredObject(key=answer_with_score.Answer.id,
                             score=answer_with_score.score,
                             rounds=answer_with_score.Answer.round,
                             variable1=None,
                             variable2=None,
                             wins=None,
                             loses=None,
                             opponents=None))

        comparison_pairs = [
            comparison.convert_to_comparison_pair()
            for comparison in comparisons
        ]

        comparison_pair = generate_pair(package_name=pairing_algorithm.value,
                                        scored_objects=scored_objects,
                                        comparison_pairs=comparison_pairs,
                                        log=current_app.logger)

        return comparison_pair
Пример #4
0
            comparisons = []

            for round in range(1, NUMBER_OF_ROUNDS + 1):
                if len(students) == 0:
                    break

                for comparison_in_round in range(ROUND_LENGTH):
                    if len(students) == 0:
                        break

                    random.shuffle(students)
                    student = students[0]
                    student_comparisons = student['comparisons_completed']

                    comparison_pair = generate_pair(
                        package_name=pairing_package_name,
                        scored_objects=answers,
                        comparison_pairs=student_comparisons)
                    key1 = comparison_pair.key1
                    key2 = comparison_pair.key2
                    winner = select_winner(round, student['key'], key1, key2)
                    comparison_pair = comparison_pair._replace(winner=winner)

                    comparisons.append(comparison_pair)
                    student['comparisons_completed'].append(comparison_pair)
                    student[
                        'comparisons_left'] = student['comparisons_left'] - 1
                    if student['comparisons_left'] <= 0:
                        students.remove(student)

                    index1 = next(index for index, answer in enumerate(answers)
                                  if answer.key == key1)
def _run(file_path, pairing_package_name, scoring_package_name, winner_selector, correct_rate):
    random.seed()
    numpy.random.seed()

    actual_grades = numpy.random.normal(0.78, 0.1, NUMBER_OF_ANSWERS)
    grade_by_answer_key = {}

    answers = []
    results = []
    for key, grade in enumerate(actual_grades):
        grade_by_answer_key[key+1] = grade
        answers.append(ScoredObject(
            key=key+1, score=0, variable1=None, variable2=None,
            rounds=0, opponents=0, wins=0, loses=0
        ))

    students = []
    for key in range(NUMBER_OF_STUDENTS):
        students.append({
            'key': key,
            'comparisons_left': NUMBER_OF_COMPARISONS_PER_STUDENT,
            'comparisons_completed': []
        })

    comparisons = []

    for round_count in range(1, NUMBER_OF_ROUNDS+1):
        if len(students) == 0:
            break

        for comparison_in_round in range(ROUND_LENGTH):
            if len(students) == 0:
                break

            student = random.choice(students)
            student_comparisons = student['comparisons_completed']

            comparison_pair = generate_pair(
                package_name=pairing_package_name,
                scored_objects=answers,
                comparison_pairs=student_comparisons
            )
            key1 = comparison_pair.key1
            key1_grade = grade_by_answer_key[key1]
            key2 = comparison_pair.key2
            key2_grade = grade_by_answer_key[key2]

            winner = _decide_winner(
                winner_selector, correct_rate,
                key1_grade, key2_grade
            )
            comparison_pair = comparison_pair._replace(winner=winner)

            comparisons.append(comparison_pair)
            student['comparisons_completed'].append(comparison_pair)
            student['comparisons_left'] -= 1
            if student['comparisons_left'] <= 0:
                indexes = [i for i, s in enumerate(students) if student['key'] == s['key']]
                del students[indexes[0]]

            index1 = next(index for index, answer in enumerate(answers) if answer.key == key1)
            index2 = next(index for index, answer in enumerate(answers) if answer.key == key2)

            result1, results2 = calculate_score_1vs1(
                package_name=scoring_package_name,
                key1_scored_object=answers[index1],
                key2_scored_object=answers[index2],
                winner=winner,
                other_comparison_pairs=comparisons
            )
            answers[index1] = result1
            answers[index2] = results2

        current_scores = [answer.score for answer in answers]

        r_value, pearsonr_p_value = pearsonr(actual_grades, current_scores)
        results.append(str(r_value))
        #print("Round {} ----------- pearsonr={} value=={}".format(
        #    round_count, r_value, pearsonr_p_value
        #))

    with open(file_path, "a") as csvfile:
        out = csv.writer(csvfile)
        out.writerow(results)
Пример #6
0
    def _get_new_comparison_pair(cls, course_id, assignment_id, user_id,
                                 group_id, pairing_algorithm, comparisons):
        from . import Assignment, UserCourse, CourseRole, Answer, AnswerScore, \
            PairingAlgorithm, AnswerCriterionScore, AssignmentCriterion, Group

        # exclude current user and those without a proper role.
        # note that sys admin (not enrolled in the course and thus no course role) can create answers.
        # they are considered eligible
        ineligibles = UserCourse.query \
            .with_entities(UserCourse.user_id) \
            .filter(and_(
                UserCourse.course_id == course_id,
                UserCourse.course_role == CourseRole.dropped
            )) \
            .all()

        ineligible_user_ids = [
            ineligible.user_id for ineligible in ineligibles
        ]
        ineligible_user_ids.append(user_id)

        query = Answer.query \
            .with_entities(Answer, AnswerScore.score) \
            .outerjoin(AnswerScore, AnswerScore.answer_id == Answer.id) \
            .filter(and_(
                or_(
                    ~Answer.user_id.in_(ineligible_user_ids),
                    Answer.user_id == None # don't filter out group answers
                ),
                Answer.assignment_id == assignment_id,
                Answer.active == True,
                Answer.practice == False,
                Answer.draft == False,
                Answer.comparable == True
            ))

        if group_id:
            query = query.filter(Answer.group_id != group_id)

        answers_with_score = query.all()

        scored_objects = []
        for answer_with_score in answers_with_score:
            scored_objects.append(
                ScoredObject(key=answer_with_score.Answer.id,
                             score=answer_with_score.score,
                             rounds=answer_with_score.Answer.round,
                             variable1=None,
                             variable2=None,
                             wins=None,
                             loses=None,
                             opponents=None))

        comparison_pairs = [
            comparison.convert_to_comparison_pair()
            for comparison in comparisons
        ]

        # adaptive min delta algo requires extra criterion specific parameters
        if pairing_algorithm == PairingAlgorithm.adaptive_min_delta:
            # retrieve extra criterion score data
            answer_criterion_scores = AnswerCriterionScore.query \
                .with_entities(AnswerCriterionScore.answer_id,
                    AnswerCriterionScore.criterion_id, AnswerCriterionScore.score) \
                .join(Answer) \
                .filter(and_(
                    Answer.user_id.notin_(ineligible_user_ids),
                    Answer.assignment_id == assignment_id,
                    Answer.active == True,
                    Answer.practice == False,
                    Answer.draft == False
                )) \
                .all()

            assignment_criterion_weights = AssignmentCriterion.query \
                .with_entities(AssignmentCriterion.criterion_id, AssignmentCriterion.weight) \
                .filter(and_(
                    AssignmentCriterion.assignment_id == assignment_id,
                    AssignmentCriterion.active == True
                )) \
                .all()

            criterion_scores = {}
            for criterion_score in answer_criterion_scores:
                scores = criterion_scores.setdefault(criterion_score.answer_id,
                                                     {})
                scores[criterion_score.criterion_id] = criterion_score.score

            criterion_weights = {}
            for the_weight in assignment_criterion_weights:
                criterion_weights[the_weight.criterion_id] = \
                    the_weight.weight

            comparison_pair = generate_pair(
                package_name=pairing_algorithm.value,
                scored_objects=scored_objects,
                comparison_pairs=comparison_pairs,
                criterion_scores=criterion_scores,
                criterion_weights=criterion_weights,
                log=current_app.logger)
        else:
            comparison_pair = generate_pair(
                package_name=pairing_algorithm.value,
                scored_objects=scored_objects,
                comparison_pairs=comparison_pairs,
                log=current_app.logger)

        return comparison_pair
Пример #7
0
def _run(file_path, pairing_package_name, scoring_package_name,
         winner_selector, correct_rate, actual_grades, repetition_count):
    random.seed()
    numpy.random.seed()

    while repetition_count < REPETITIONS:
        grade_by_answer_key = {}
        answers = []
        results = []
        for key, grade in enumerate(actual_grades):
            grade_by_answer_key[key + 1] = grade
            answers.append(
                ScoredObject(key=key + 1,
                             score=0,
                             variable1=None,
                             variable2=None,
                             rounds=0,
                             opponents=0,
                             wins=0,
                             loses=0))

        students = []
        for key in range(NUMBER_OF_STUDENTS):
            students.append({
                'key': key,
                'comparisons_left': NUMBER_OF_COMPARISONS_PER_STUDENT,
                'comparisons_completed': []
            })

        comparisons = []

        for round_count in range(1, NUMBER_OF_ROUNDS + 1):
            if len(students) == 0:
                break

            for comparison_in_round in range(ROUND_LENGTH):
                if len(students) == 0:
                    break

                student = random.choice(students)
                student_comparisons = student['comparisons_completed']

                comparison_pair = generate_pair(
                    package_name=pairing_package_name,
                    scored_objects=answers,
                    comparison_pairs=student_comparisons)
                key1 = comparison_pair.key1
                key1_grade = grade_by_answer_key[key1]
                key2 = comparison_pair.key2
                key2_grade = grade_by_answer_key[key2]

                winner = _decide_winner(winner_selector, correct_rate,
                                        key1_grade, key2_grade)
                comparison_pair = comparison_pair._replace(winner=winner)

                comparisons.append(comparison_pair)
                student['comparisons_completed'].append(comparison_pair)
                student['comparisons_left'] -= 1
                if student['comparisons_left'] <= 0:
                    indexes = [
                        i for i, s in enumerate(students)
                        if student['key'] == s['key']
                    ]
                    del students[indexes[0]]

                index1 = next(index for index, answer in enumerate(answers)
                              if answer.key == key1)
                index2 = next(index for index, answer in enumerate(answers)
                              if answer.key == key2)

                result1, results2 = calculate_score_1vs1(
                    package_name=scoring_package_name,
                    key1_scored_object=answers[index1],
                    key2_scored_object=answers[index2],
                    winner=winner,
                    other_comparison_pairs=comparisons)
                answers[index1] = result1
                answers[index2] = results2

            current_scores = [answer.score for answer in answers]

            r_value, pearsonr_p_value = pearsonr(ACTUAL_GRADES, current_scores)
            results.append(str(r_value))
            #print("Round {} ----------- pearsonr={} value=={}".format(
            #    round_count, r_value, pearsonr_p_value
            #))

        with open(file_path, "a") as csvfile:
            out = csv.writer(csvfile)
            out.writerow(results)

        # prepare for next run
        repetition_count += 1
        actual_grades = [answer.score for answer in answers]
Пример #8
0
            for round in range(1, NUMBER_OF_ROUNDS+1):
                if len(students) == 0:
                    break

                for comparison_in_round in range(ROUND_LENGTH):
                    if len(students) == 0:
                        break

                    random.shuffle(students)
                    student = students[0]
                    student_comparisons = student['comparisons_completed']

                    comparison_pair = generate_pair(
                        package_name=pairing_package_name,
                        scored_objects=answers,
                        comparison_pairs=student_comparisons
                    )
                    key1 = comparison_pair.key1
                    key2 = comparison_pair.key2
                    winning_key = select_winner(round, student['key'], key1, key2)
                    comparison_pair = comparison_pair._replace(winning_key=winning_key)

                    comparisons.append(comparison_pair)
                    student['comparisons_completed'].append(comparison_pair)
                    student['comparisons_left'] = student['comparisons_left'] - 1
                    if student['comparisons_left'] <= 0:
                        students.remove(student)

                    index1 = next(index for index, answer in enumerate(answers) if answer.key == key1)
                    index2 = next(index for index, answer in enumerate(answers) if answer.key == key2)
Пример #9
0
    def _get_new_comparison_pair(cls, course_id, assignment_id, user_id, group_id,
                                pairing_algorithm, comparisons):
        from . import Assignment, UserCourse, CourseRole, Answer, AnswerScore, \
            PairingAlgorithm, AnswerCriterionScore, AssignmentCriterion, Group

        # exclude current user and those without a proper role.
        # note that sys admin (not enrolled in the course and thus no course role) can create answers.
        # they are considered eligible
        ineligibles = UserCourse.query \
            .with_entities(UserCourse.user_id) \
            .filter(and_(
                UserCourse.course_id == course_id,
                UserCourse.course_role == CourseRole.dropped
            )) \
            .all()

        ineligible_user_ids = [ineligible.user_id for ineligible in ineligibles]
        ineligible_user_ids.append(user_id)

        query = Answer.query \
            .with_entities(Answer, AnswerScore.score) \
            .outerjoin(AnswerScore, AnswerScore.answer_id == Answer.id) \
            .filter(and_(
                or_(
                    ~Answer.user_id.in_(ineligible_user_ids),
                    Answer.user_id == None # don't filter out group answers
                ),
                Answer.assignment_id == assignment_id,
                Answer.active == True,
                Answer.practice == False,
                Answer.draft == False,
                Answer.comparable == True
            ))

        if group_id:
            query = query.filter(Answer.group_id != group_id)

        answers_with_score = query.all()

        scored_objects = []
        for answer_with_score in answers_with_score:
            scored_objects.append(ScoredObject(
                key=answer_with_score.Answer.id,
                score=answer_with_score.score,
                rounds=answer_with_score.Answer.round,
                variable1=None, variable2=None,
                wins=None, loses=None, opponents=None
            ))

        comparison_pairs = [comparison.convert_to_comparison_pair() for comparison in comparisons]

        # adaptive min delta algo requires extra criterion specific parameters
        if pairing_algorithm == PairingAlgorithm.adaptive_min_delta:
            # retrieve extra criterion score data
            answer_criterion_scores = AnswerCriterionScore.query \
                .with_entities(AnswerCriterionScore.answer_id,
                    AnswerCriterionScore.criterion_id, AnswerCriterionScore.score) \
                .join(Answer) \
                .filter(and_(
                    Answer.user_id.notin_(ineligible_user_ids),
                    Answer.assignment_id == assignment_id,
                    Answer.active == True,
                    Answer.practice == False,
                    Answer.draft == False
                )) \
                .all()

            assignment_criterion_weights = AssignmentCriterion.query \
                .with_entities(AssignmentCriterion.criterion_id, AssignmentCriterion.weight) \
                .filter(and_(
                    AssignmentCriterion.assignment_id == assignment_id,
                    AssignmentCriterion.active == True
                )) \
                .all()

            criterion_scores = {}
            for criterion_score in answer_criterion_scores:
                scores = criterion_scores.setdefault(criterion_score.answer_id, {})
                scores[criterion_score.criterion_id] = criterion_score.score

            criterion_weights = {}
            for the_weight in assignment_criterion_weights:
                criterion_weights[the_weight.criterion_id] = \
                    the_weight.weight

            comparison_pair = generate_pair(
                package_name=pairing_algorithm.value,
                scored_objects=scored_objects,
                comparison_pairs=comparison_pairs,
                criterion_scores=criterion_scores,
                criterion_weights=criterion_weights,
                log=current_app.logger
            )
        else:
            comparison_pair = generate_pair(
                package_name=pairing_algorithm.value,
                scored_objects=scored_objects,
                comparison_pairs=comparison_pairs,
                log=current_app.logger
            )

        return comparison_pair