Esempio n. 1
0
    def test_xqueue_control_submit(self):
        Submission.objects.all().delete()
        response = self.c.login(username='******', password='******')
        gp = deepcopy(self.grader_payload)
        gp['control'] = json.dumps({
            'min_to_calibrate': 1,
            'max_to_calibrate': 1,
            'peer_grader_count': 1,
            'required_peer_grading': 1,
            'staff_minimum_for_peer_grading': 1,
            'staff_minimum_for_ai_grading': 1,
        })

        content = self.c.post(
            SUBMIT_URL,
            self.get_content(gp),
        )
        body = json.loads(content.content)

        self.assertEqual(body['success'], True)

        sub = Submission.objects.all()[0]
        control = SubmissionControl(sub)
        self.assertEqual(control.min_to_calibrate, 1)
        self.assertEqual(control.max_to_calibrate, 1)
        self.assertEqual(control.peer_grader_count, 1)
        self.assertEqual(control.required_peer_grading_per_student, 1)
        self.assertEqual(control.minimum_to_use_ai, 1)
        self.assertEqual(control.minimum_to_use_peer, 1)
Esempio n. 2
0
    def test_control_create(self):
        test_sub = test_util.get_sub("PE", STUDENT_ID, LOCATION, "PE")
        test_sub.control_fields = json.dumps({
            'min_to_calibrate':
            1,
            'max_to_calibrate':
            1,
            'peer_grader_count':
            1,
            'required_peer_grading':
            1,
            'peer_grade_finished_submissions_when_none_pending':
            True,
            'staff_minimum_for_peer_grading':
            1,
            'staff_minimum_for_ai_grading':
            1,
        })
        test_sub.save()

        control = SubmissionControl(test_sub)

        self.assertEqual(control.min_to_calibrate, 1)
        self.assertEqual(control.max_to_calibrate, 1)
        self.assertEqual(control.peer_grader_count, 1)
        self.assertEqual(control.required_peer_grading_per_student, 1)
        self.assertEqual(
            control.peer_grade_finished_submissions_when_none_pending, True)
        self.assertEqual(control.minimum_to_use_peer, 1)
        self.assertEqual(control.minimum_to_use_ai, 1)
Esempio n. 3
0
    def test_control_default(self):
        test_sub = test_util.get_sub("PE", STUDENT_ID, LOCATION, "PE")
        test_sub.save()

        control = SubmissionControl(test_sub)

        self.assertEqual(control.min_to_calibrate,
                         settings.PEER_GRADER_MINIMUM_TO_CALIBRATE)
        self.assertEqual(control.max_to_calibrate,
                         settings.PEER_GRADER_MAXIMUM_TO_CALIBRATE)
        self.assertEqual(control.peer_grader_count, settings.PEER_GRADER_COUNT)
        self.assertEqual(control.required_peer_grading_per_student,
                         settings.REQUIRED_PEER_GRADING_PER_STUDENT)
Esempio n. 4
0
    def test_control_default(self):
        test_sub = test_util.get_sub("PE", STUDENT_ID, LOCATION, "PE")
        test_sub.save()

        control = SubmissionControl(test_sub)

        self.assertEqual(control.min_to_calibrate, settings.PEER_GRADER_MINIMUM_TO_CALIBRATE)
        self.assertEqual(control.max_to_calibrate, settings.PEER_GRADER_MAXIMUM_TO_CALIBRATE)
        self.assertEqual(control.peer_grader_count, settings.PEER_GRADER_COUNT)
        self.assertEqual(control.required_peer_grading_per_student, settings.REQUIRED_PEER_GRADING_PER_STUDENT)
        self.assertEqual(control.peer_grade_finished_submissions_when_none_pending,
                         settings.PEER_GRADE_FINISHED_SUBMISSIONS_WHEN_NONE_PENDING)
        self.assertEqual(control.minimum_to_use_ai, settings.MIN_TO_USE_ML)
        self.assertEqual(control.minimum_to_use_peer, settings.MIN_TO_USE_PEER)
Esempio n. 5
0
def get_submission_ml(request):
    """
    Gets a submission for the ML grader
    Input:
        Get request with no parameters
    """
    unique_locations = [
        x['location']
        for x in list(Submission.objects.values('location').distinct())
    ]
    for location in unique_locations:
        nothing_to_ml_grade_for_location_key = NOTHING_TO_ML_GRADE_LOCATION_CACHE_KEY.format(
            location=location)
        # Go to the next location if we have recently determined that a location
        # has no ML grading ready.
        if cache.get(nothing_to_ml_grade_for_location_key):
            continue

        sl = staff_grading_util.StaffLocation(location)
        control = SubmissionControl(sl.latest_submission())

        subs_graded_by_instructor = sl.graded_count()
        success = ml_grading_util.check_for_all_model_and_rubric_success(
            location)
        if subs_graded_by_instructor >= control.minimum_to_use_ai and success:
            to_be_graded = Submission.objects.filter(
                location=location,
                state=SubmissionState.waiting_to_be_graded,
                next_grader_type="ML",
            )
            if (to_be_graded.count() > 0):
                to_be_graded = to_be_graded[0]
                if to_be_graded is not None:
                    to_be_graded.state = SubmissionState.being_graded
                    to_be_graded.save()

                    return util._success_response(
                        {'submission_id': to_be_graded.id}, _INTERFACE_VERSION)
        # If we don't get a submission to return, then there is no ML grading for this location.
        # Cache this boolean to avoid an expensive loop iteration.
        cache.set(nothing_to_ml_grade_for_location_key, True,
                  settings.RECHECK_EMPTY_ML_GRADE_QUEUE_DELAY)

    util.log_connection_data()

    # Set this cache key to ensure that this expensive function isn't repeatedly called when not needed.
    cache.set(NOTHING_TO_ML_GRADE_CACHE_KEY, True,
              settings.RECHECK_EMPTY_ML_GRADE_QUEUE_DELAY)
    return util._error_response("Nothing to grade.", _INTERFACE_VERSION)
Esempio n. 6
0
    def test_check_if_grading_finished_for_duplicates(self):
        test_sub = test_util.get_sub("PE", STUDENT_ID, LOCATION, "PE")
        test_sub.save()
        handle_submission(test_sub)

        sl = StaffLocation(LOCATION)
        control = SubmissionControl(sl.latest_submission())

        for i in xrange(0, control.minimum_to_use_peer):
            test_sub = test_util.get_sub("PE", STUDENT_ID, LOCATION, "PE")
            test_sub.save()
            handle_submission(test_sub)
            test_grader = test_util.get_grader("IN")
            test_grader.submission = test_sub
            test_grader.save()

            test_sub.state = SubmissionState.finished
            test_sub.previous_grader_type = "IN"
            test_sub.posted_results_back_to_queue = True
            test_sub.save()

        test_sub2 = test_util.get_sub("PE", STUDENT_ID, LOCATION, "PE")
        test_sub2.save()
        handle_submission(test_sub2)
        self.assertTrue(test_sub2.is_duplicate)

        success = expire_submissions.check_if_grading_finished_for_duplicates()
        self.assertEqual(success, True)
        test_sub2.is_duplicate = False
        test_sub2.save()

        test_sub3 = test_util.get_sub("PE", STUDENT_ID, LOCATION, "PE")
        test_sub3.is_duplicate = False
        test_sub3.save()

        self.assertEqual(test_sub3.is_duplicate, False)
        test_sub3.has_been_duplicate_checked = False
        test_sub3.save()
        expire_submissions.mark_student_duplicate_submissions()
        test_sub3 = Submission.objects.get(id=test_sub3.id)
        self.assertEqual(test_sub3.is_duplicate, True)

        test_sub3.duplicate_submission_id = None
        test_sub3.is_plagiarized = False
        test_sub3.save()
        expire_submissions.add_in_duplicate_ids()
        test_sub3 = Submission.objects.get(id=test_sub3.id)
        self.assertTrue(test_sub3.duplicate_submission_id is not None)
Esempio n. 7
0
    def test_control_create(self):
        test_sub = test_util.get_sub("PE", STUDENT_ID, LOCATION, "PE")
        test_sub.control_fields = json.dumps({
            'min_to_calibrate': 1,
            'max_to_calibrate': 1,
            'peer_grader_count': 1,
            'required_peer_grading': 1
        })
        test_sub.save()

        control = SubmissionControl(test_sub)

        self.assertEqual(control.min_to_calibrate, 1)
        self.assertEqual(control.max_to_calibrate, 1)
        self.assertEqual(control.peer_grader_count, 1)
        self.assertEqual(control.required_peer_grading_per_student, 1)
Esempio n. 8
0
    def test_xqueue_submit(self):
        Submission.objects.all().delete()
        response = self.c.login(username='******', password='******')

        content = self.c.post(
            SUBMIT_URL,
            self.get_content(self.grader_payload),
        )

        body = json.loads(content.content)

        self.assertEqual(body['success'], True)

        sub = Submission.objects.all()[0]
        control = SubmissionControl(sub)
        self.assertEqual(control.min_to_calibrate, settings.PEER_GRADER_MINIMUM_TO_CALIBRATE)
        self.assertEqual(control.max_to_calibrate, settings.PEER_GRADER_MAXIMUM_TO_CALIBRATE)
        self.assertEqual(control.peer_grader_count, settings.PEER_GRADER_COUNT)
        self.assertEqual(control.required_peer_grading_per_student, settings.REQUIRED_PEER_GRADING_PER_STUDENT)
        self.assertEqual(control.minimum_to_use_ai, settings.MIN_TO_USE_ML)
        self.assertEqual(control.minimum_to_use_peer, settings.MIN_TO_USE_PEER)
Esempio n. 9
0
def handle_submission(sub):
    """
    Handles a new submission.  Decides what the next grader should be and saves it.
    Input:
        sub - A Submission object from controller.models

    Output:
        True/False status code
    """
    try:
        #Run some basic sanity checks on submission.  Also check to see if student is banned, and fail them if they are.
        sub.next_grader_type = "BC"
        sub.save()
        transaction.commit_unless_managed()
        success, check_dict = basic_check_util.simple_quality_check(
            sub.student_response, sub.initial_display, sub.student_id,
            sub.skip_basic_checks)
        if not success:
            log.exception("could not run basic checks on {0}".format(
                sub.student_response))

        #add additional tags needed to create a grader object
        check_dict = grader_util.add_additional_tags_to_dict(
            check_dict, sub.id)
        if check_dict['score'] == 0:
            success, max_rubric_scores = rubric_functions.generate_targets_from_rubric(
                sub.rubric)
            if success:
                check_dict['rubric_scores_complete'] = True
                check_dict['rubric_scores'] = [
                    0 for i in xrange(0, len(max_rubric_scores))
                ]

        #Create and handle the grader, and return
        grader_util.create_and_handle_grader_object(check_dict)

        #If the checks result in a score of 0 (out of 1), then the submission fails basic sanity checks
        #Return to student and don't process further
        if check_dict['score'] == 0:
            return True
        else:
            sub.state = SubmissionState.waiting_to_be_graded

        sl = staff_grading_util.StaffLocation(sub.location)
        #Assign whether grader should be ML or IN based on number of graded examples.
        subs_graded_by_instructor, subs_pending_instructor = sl.graded_count(
        ), sl.pending_count()

        #TODO: abstract out logic for assigning which grader to go with.
        grader_settings_path = os.path.join(settings.GRADER_SETTINGS_DIRECTORY,
                                            sub.grader_settings)
        grader_settings = grader_util.get_grader_settings(grader_settings_path)

        control = SubmissionControl(sl.latest_submission())

        if grader_settings['grader_type'] == "ML":
            success = ml_grading_util.check_for_all_model_and_rubric_success(
                sub.location)
            if ((subs_graded_by_instructor + subs_pending_instructor) >=
                    control.minimum_to_use_ai) and success:
                sub.next_grader_type = "ML"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "PE":
            #Ensures that there will be some calibration essays before peer grading begins!
            #Calibration essays can be added using command line utility, or through normal instructor grading.
            if ((subs_graded_by_instructor + subs_pending_instructor) >=
                    control.minimum_to_use_peer):
                sub.next_grader_type = "PE"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "IN":
            sub.next_grader_type = "IN"
        else:
            log.exception("Invalid grader type specified in settings file.")
            return False

        sub.preferred_grader_type = grader_settings['grader_type']

        #Do duplicate checks
        is_duplicate, is_plagiarized, duplicate_id = grader_util.check_is_duplicate_and_plagiarized(
            sub.student_response, sub.location, sub.student_id,
            sub.preferred_grader_type)
        sub.is_duplicate = is_duplicate
        sub.is_plagiarized = is_plagiarized
        sub.duplicate_submission_id = duplicate_id
        sub.has_been_duplicate_checked = True
        statsd.increment(
            "open_ended_assessment.grading_controller.controller.xqueue_interface.handle_submission.duplicates",
            tags=[
                "duplicate:{0}".format(is_duplicate),
                "is_plagiarized:{0}".format(is_plagiarized)
            ])

        sub.save()

    except Exception:
        log.exception("Submission creation failed!")
        return False

    transaction.commit_unless_managed()
    return True