Exemplo n.º 1
0
    def item_to_score(self):
        """
        Gets an item for an instructor to score.
        """
        subs_graded = self.graded_count()
        success = ml_grading_util.check_for_all_model_and_rubric_success(
            self.location)
        control = SubmissionControl(self.latest_submission())

        if subs_graded < control.minimum_to_use_ai or not success:
            to_be_graded = self.pending()

            finished_submission_text = self.graded_submission_text()

            for tbg in to_be_graded:
                if tbg is not None and tbg.student_response not in finished_submission_text:
                    tbg.state = SubmissionState.being_graded
                    tbg.next_grader_type = "IN"
                    tbg.save()
                    found = True
                    sub_id = tbg.id

                    return found, sub_id

        #If nothing is found, return false
        return False, 0
Exemplo n.º 2
0
    def item_to_rescore(self):
        """
        Gets an item for an instructor to rescore (items have already been machine scored, ML)
        """
        success = ml_grading_util.check_for_all_model_and_rubric_success(
            self.location)

        if success:
            #Order by confidence if we are looking for finished ML submissions
            finished_submission_text = self.graded_submission_text()
            to_be_graded = self.pending().filter(
                grader__status_code=GraderStatus.success).order_by(
                    'grader__confidence')

            for tbg in to_be_graded:
                if tbg is not None and tbg.student_response not in finished_submission_text:
                    tbg.state = SubmissionState.being_graded
                    tbg.next_grader_type = "IN"
                    tbg.save()
                    found = True
                    sub_id = tbg.id

                    return found, sub_id

                    #If nothing is found, return false
        return False, 0
Exemplo n.º 3
0
def get_submission_ml(request):
    """
    Gets a submission for the ML grader
    Input:
        Get request with no parameters
    """
    unique_locations = [x["location"] for x in list(Submission.objects.values("location").distinct())]
    for location in unique_locations:
        subs_graded_by_instructor = staff_grading_util.finished_submissions_graded_by_instructor(location).count()
        success = ml_grading_util.check_for_all_model_and_rubric_success(location)
        if subs_graded_by_instructor >= settings.MIN_TO_USE_ML and success:
            to_be_graded = Submission.objects.filter(
                location=location, state=SubmissionState.waiting_to_be_graded, next_grader_type="ML"
            )
            if to_be_graded.count() > 0:
                to_be_graded = to_be_graded[0]
                if to_be_graded is not None:
                    to_be_graded.state = SubmissionState.being_graded
                    to_be_graded.save()

                    # Insert timing initialization code
                    initialize_timing(to_be_graded)

                    return util._success_response({"submission_id": to_be_graded.id}, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._error_response("Nothing to grade.", _INTERFACE_VERSION)
Exemplo n.º 4
0
    def item_to_score(self):
        """
        Gets an item for an instructor to score.
        """
        subs_graded = self.graded_count()
        success = ml_grading_util.check_for_all_model_and_rubric_success(self.location)
        control = SubmissionControl(self.latest_submission())

        if subs_graded < control.minimum_to_use_ai or not success:
            to_be_graded = self.pending()

            finished_submission_text=self.graded_submission_text()

            for tbg in to_be_graded:
                if tbg is not None:
                    tbg.state = SubmissionState.being_graded
                    tbg.next_grader_type="IN"
                    tbg.save()
                    found = True
                    sub_id = tbg.id

                    return found, sub_id

        #If nothing is found, return false
        return False, 0
Exemplo n.º 5
0
def get_submission_ml(request):
    """
    Gets a submission for the ML grader
    Input:
        Get request with no parameters
    """
    unique_locations = [
        x['location']
        for x in list(Submission.objects.values('location').distinct())
    ]
    for location in unique_locations:
        sl = staff_grading_util.StaffLocation(location)
        subs_graded_by_instructor = sl.graded_count()
        success = ml_grading_util.check_for_all_model_and_rubric_success(
            location)
        if subs_graded_by_instructor >= settings.MIN_TO_USE_ML and success:
            to_be_graded = Submission.objects.filter(
                location=location,
                state=SubmissionState.waiting_to_be_graded,
                next_grader_type="ML",
            )
            if (to_be_graded.count() > 0):
                to_be_graded = to_be_graded[0]
                if to_be_graded is not None:
                    to_be_graded.state = SubmissionState.being_graded
                    to_be_graded.save()

                    return util._success_response(
                        {'submission_id': to_be_graded.id}, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._error_response("Nothing to grade.", _INTERFACE_VERSION)
Exemplo n.º 6
0
def get_single_instructor_grading_item_for_location_with_options(location,check_for_ml=True,types_to_check_for=None,
                                                                 submission_state_to_check_for=SubmissionState.waiting_to_be_graded):
    """
    Returns a single instructor grading item for a given location
    Input:
        Problem location, boolean check_for_ML, which dictates whether or not problems should be returned
        to the instructor if there is already an ML model trained for this location or not.  If True, then
        it does not return submissions for which an ML model has already been trained.
    Output:
        Boolean success/fail, and then either error message or submission id of a valid submission.
    """

    if not types_to_check_for:
        types_to_check_for="IN"

    log.debug("Looking for  location {0}, state {1}, next_grader_type {2}".format(location,
        submission_state_to_check_for, types_to_check_for))

    subs_graded = finished_submissions_graded_by_instructor(location).count()
    subs_pending = submissions_pending_instructor(location, state_in=[SubmissionState.being_graded]).count()
    success= ml_grading_util.check_for_all_model_and_rubric_success(location)

    if ((subs_graded + subs_pending) < settings.MIN_TO_USE_ML or not success) or not check_for_ml:
        to_be_graded = Submission.objects.filter(
            location=location,
            state=submission_state_to_check_for,
            next_grader_type=types_to_check_for,
        )

        #Order by confidence if we are looking for finished ML submissions
        finished_submission_text=submission_text_graded_by_instructor(location)
        if types_to_check_for == "ML" and submission_state_to_check_for == SubmissionState.finished:
            to_be_graded = to_be_graded.filter(grader__status_code=GraderStatus.success).order_by('grader__confidence')

        to_be_graded_count=to_be_graded.count()
        log.debug("Looking for  location {0} and got count {1}".format(location,to_be_graded_count))

        for i in xrange(0,to_be_graded_count):
            #In some cases, this causes a model query error without the try/except block due to the checked out state
            try:
                to_be_graded_obj = to_be_graded[i]
            except:
                return False, 0
            if to_be_graded_obj is not None and to_be_graded_obj.student_response not in finished_submission_text:
                to_be_graded_obj.state = SubmissionState.being_graded
                to_be_graded_obj.next_grader_type="IN"
                to_be_graded_obj.save()
                found = True
                sub_id = to_be_graded_obj.id

                #Insert timing initialization code
                initialize_timing(sub_id)

                return found, sub_id

        #If nothing is found, return false
    return False, 0
Exemplo n.º 7
0
def get_submission_ml(request):
    """
    Gets a submission for the ML grader
    Input:
        Get request with no parameters
    """
    unique_locations = [
        x['location']
        for x in list(Submission.objects.values('location').distinct())
    ]
    for location in unique_locations:
        nothing_to_ml_grade_for_location_key = NOTHING_TO_ML_GRADE_LOCATION_CACHE_KEY.format(
            location=location)
        # Go to the next location if we have recently determined that a location
        # has no ML grading ready.
        if cache.get(nothing_to_ml_grade_for_location_key):
            continue

        sl = staff_grading_util.StaffLocation(location)
        control = SubmissionControl(sl.latest_submission())

        subs_graded_by_instructor = sl.graded_count()
        success = ml_grading_util.check_for_all_model_and_rubric_success(
            location)
        if subs_graded_by_instructor >= control.minimum_to_use_ai and success:
            to_be_graded = Submission.objects.filter(
                location=location,
                state=SubmissionState.waiting_to_be_graded,
                next_grader_type="ML",
            )
            if (to_be_graded.count() > 0):
                to_be_graded = to_be_graded[0]
                if to_be_graded is not None:
                    to_be_graded.state = SubmissionState.being_graded
                    to_be_graded.save()

                    return util._success_response(
                        {'submission_id': to_be_graded.id}, _INTERFACE_VERSION)
        # If we don't get a submission to return, then there is no ML grading for this location.
        # Cache this boolean to avoid an expensive loop iteration.
        cache.set(nothing_to_ml_grade_for_location_key, True,
                  settings.RECHECK_EMPTY_ML_GRADE_QUEUE_DELAY)

    util.log_connection_data()

    # Set this cache key to ensure that this expensive function isn't repeatedly called when not needed.
    cache.set(NOTHING_TO_ML_GRADE_CACHE_KEY, True,
              settings.RECHECK_EMPTY_ML_GRADE_QUEUE_DELAY)
    return util._error_response("Nothing to grade.", _INTERFACE_VERSION)
Exemplo n.º 8
0
def get_submission_ml(request):
    """
    Gets a submission for the ML grader
    Input:
        Get request with no parameters
    """
    unique_locations = [x['location'] for x in list(Submission.objects.values('location').distinct())]
    for location in unique_locations:
        nothing_to_ml_grade_for_location_key = NOTHING_TO_ML_GRADE_LOCATION_CACHE_KEY.format(location=location)
        # Go to the next location if we have recently determined that a location
        # has no ML grading ready.
        if cache.get(nothing_to_ml_grade_for_location_key):
            continue

        sl = staff_grading_util.StaffLocation(location)
        subs_graded_by_instructor = sl.graded_count()
        success = ml_grading_util.check_for_all_model_and_rubric_success(location)
        if subs_graded_by_instructor >= settings.MIN_TO_USE_ML and success:
            to_be_graded = Submission.objects.filter(
                location=location,
                state=SubmissionState.waiting_to_be_graded,
                next_grader_type="ML",
            )
            if(to_be_graded.count() > 0):
                to_be_graded = to_be_graded[0]
                if to_be_graded is not None:
                    to_be_graded.state = SubmissionState.being_graded
                    to_be_graded.save()

                    return util._success_response({'submission_id' : to_be_graded.id}, _INTERFACE_VERSION)
        # If we don't get a submission to return, then there is no ML grading for this location.
        # Cache this boolean to avoid an expensive loop iteration.
        cache.set(nothing_to_ml_grade_for_location_key, True, settings.RECHECK_EMPTY_ML_GRADE_QUEUE_DELAY)

    util.log_connection_data()

    # Set this cache key to ensure that this expensive function isn't repeatedly called when not needed.
    cache.set(NOTHING_TO_ML_GRADE_CACHE_KEY, True, settings.RECHECK_EMPTY_ML_GRADE_QUEUE_DELAY)
    return util._error_response("Nothing to grade.", _INTERFACE_VERSION)
Exemplo n.º 9
0
 def item_to_rescore(self):
     """
     Gets an item for an instructor to rescore (items have already been machine scored, ML)
     """
     success= ml_grading_util.check_for_all_model_and_rubric_success(self.location)
     
     if success:
         #Order by confidence if we are looking for finished ML submissions
         finished_submission_text=self.graded_submission_text()
         to_be_graded = self.pending().filter(grader__status_code=GraderStatus.success).order_by('grader__confidence')
 
         for tbg in to_be_graded:
             if tbg is not None and tbg.student_response not in finished_submission_text:
                 tbg.state = SubmissionState.being_graded
                 tbg.next_grader_type="IN"
                 tbg.save()
                 found = True
                 sub_id = tbg.id
 
                 return found, sub_id
 
                 #If nothing is found, return false
     return False, 0
Exemplo n.º 10
0
def reset_in_subs_to_ml(subs):
    count=0
    in_subs=Submission.objects.filter(
        state=SubmissionState.waiting_to_be_graded,
        next_grader_type="IN",
        preferred_grader_type="ML"
    )

    for sub in in_subs:
        #If an instructor checks out a submission after ML grading has started,
        # this resets it to ML if the instructor times out
        success= ml_grading_util.check_for_all_model_and_rubric_success(sub.location)
        if (sub.next_grader_type=="IN" and success):
            sub.next_grader_type="ML"
            sub.save()
            count+=1

    if count>0:
        statsd.increment("open_ended_assessment.grading_controller.expire_submissions.reset_in_subs_to_ml",
            tags=["counter:{0}".format(count)])
        log.debug("Reset {0} instructor subs to ML".format(count))

    return True
Exemplo n.º 11
0
def reset_in_subs_to_ml():
    count=0
    in_subs=Submission.objects.filter(
        state=SubmissionState.waiting_to_be_graded,
        next_grader_type="IN",
        preferred_grader_type="ML"
    )

    for sub in in_subs:
        #If an instructor checks out a submission after ML grading has started,
        # this resets it to ML if the instructor times out
        success= ml_grading_util.check_for_all_model_and_rubric_success(sub.location)
        if (sub.next_grader_type=="IN" and success):
            sub.next_grader_type="ML"
            sub.save()
            count+=1

    if count>0:
        statsd.increment("open_ended_assessment.grading_controller.expire_submissions.reset_in_subs_to_ml",
            tags=["counter:{0}".format(count)])
        log.debug("Reset {0} instructor subs to ML".format(count))

    return True
Exemplo n.º 12
0
    def item_to_score(self):
        """
        Gets an item for an instructor to score.
        """
        subs_graded = self.graded_count()
        success= ml_grading_util.check_for_all_model_and_rubric_success(self.location)

        if subs_graded < settings.MIN_TO_USE_ML or not success:
            to_be_graded = self.pending()

            finished_submission_text=self.graded_submission_text()

            for tbg in to_be_graded:
                if tbg is not None and tbg.student_response not in finished_submission_text:
                    tbg.state = SubmissionState.being_graded
                    tbg.next_grader_type="IN"
                    tbg.save()
                    found = True
                    sub_id = tbg.id

                    return found, sub_id

        #If nothing is found, return false
        return False, 0
Exemplo n.º 13
0
def handle_submission(sub):
    """
    Handles a new submission.  Decides what the next grader should be and saves it.
    Input:
        sub - A Submission object from controller.models

    Output:
        True/False status code
    """
    try:
        #Run some basic sanity checks on submission.  Also check to see if student is banned, and fail them if they are.
        sub.next_grader_type = "BC"
        sub.save()
        transaction.commit_unless_managed()
        timing_functions.initialize_timing(sub.id)
        success, check_dict = basic_check_util.simple_quality_check(sub.student_response,
            sub.initial_display, sub.student_id, sub.skip_basic_checks)
        if not success:
            log.exception("could not run basic checks on {0}".format(sub.student_response))

        #add additional tags needed to create a grader object
        check_dict = grader_util.add_additional_tags_to_dict(check_dict, sub.id)
        if check_dict['score']==0:
            success, max_rubric_scores = rubric_functions.generate_targets_from_rubric(sub.rubric)
            log.debug(max_rubric_scores)
            if success:
                check_dict['rubric_scores_complete'] = True
                check_dict['rubric_scores'] = [0 for i in xrange(0,len(max_rubric_scores))]
                log.debug(check_dict)

        #Create and handle the grader, and return
        grader_util.create_and_handle_grader_object(check_dict)

        #If the checks result in a score of 0 (out of 1), then the submission fails basic sanity checks
        #Return to student and don't process further
        if check_dict['score'] == 0:
            return True
        else:
            sub.state = SubmissionState.waiting_to_be_graded

        #Assign whether grader should be ML or IN based on number of graded examples.
        subs_graded_by_instructor, subs_pending_instructor = staff_grading_util.count_submissions_graded_and_pending_instructor(
            sub.location)

        #TODO: abstract out logic for assigning which grader to go with.
        grader_settings_path = os.path.join(settings.GRADER_SETTINGS_DIRECTORY, sub.grader_settings)
        grader_settings = grader_util.get_grader_settings(grader_settings_path)


        if grader_settings['grader_type'] == "ML":
            success= ml_grading_util.check_for_all_model_and_rubric_success(sub.location)
            if(((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_ML) and success):
                sub.next_grader_type = "ML"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "PE":
            #Ensures that there will be some calibration essays before peer grading begins!
            #Calibration essays can be added using command line utility, or through normal instructor grading.
            if((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_PEER):
                sub.next_grader_type = "PE"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "IN":
            sub.next_grader_type = "IN"
        else:
            log.exception("Invalid grader type specified in settings file.")
            return False

        sub.preferred_grader_type=grader_settings['grader_type']

        #Do duplicate checks
        is_duplicate, is_plagiarized, duplicate_id = grader_util.check_is_duplicate_and_plagiarized(sub.student_response, sub.location, sub.student_id, sub.preferred_grader_type)
        sub.is_duplicate=is_duplicate
        sub.is_plagiarized = is_plagiarized
        sub.duplicate_submission_id = duplicate_id
        sub.has_been_duplicate_checked = True
        statsd.increment("open_ended_assessment.grading_controller.controller.xqueue_interface.handle_submission.duplicates",
            tags=[
                "duplicate:{0}".format(is_duplicate),
                "is_plagiarized:{0}".format(is_plagiarized)
                ])

        sub.save()
        log.debug("Submission object created successfully!")

    except:
        log.exception("Submission creation failed!")
        return False

    transaction.commit_unless_managed()
    return True
Exemplo n.º 14
0
def handle_submission(sub):
    """
    Handles a new submission.  Decides what the next grader should be and saves it.
    Input:
        sub - A Submission object from controller.models

    Output:
        True/False status code
    """
    try:
        #Run some basic sanity checks on submission.  Also check to see if student is banned, and fail them if they are.
        sub.next_grader_type = "BC"
        sub.save()
        transaction.commit_unless_managed()
        success, check_dict = basic_check_util.simple_quality_check(
            sub.student_response, sub.initial_display, sub.student_id,
            sub.skip_basic_checks)
        if not success:
            log.exception("could not run basic checks on {0}".format(
                sub.student_response))

        #add additional tags needed to create a grader object
        check_dict = grader_util.add_additional_tags_to_dict(
            check_dict, sub.id)
        if check_dict['score'] == 0:
            success, max_rubric_scores = rubric_functions.generate_targets_from_rubric(
                sub.rubric)
            if success:
                check_dict['rubric_scores_complete'] = True
                check_dict['rubric_scores'] = [
                    0 for i in xrange(0, len(max_rubric_scores))
                ]

        #Create and handle the grader, and return
        grader_util.create_and_handle_grader_object(check_dict)

        #If the checks result in a score of 0 (out of 1), then the submission fails basic sanity checks
        #Return to student and don't process further
        if check_dict['score'] == 0:
            return True
        else:
            sub.state = SubmissionState.waiting_to_be_graded

        sl = staff_grading_util.StaffLocation(sub.location)
        #Assign whether grader should be ML or IN based on number of graded examples.
        subs_graded_by_instructor, subs_pending_instructor = sl.graded_count(
        ), sl.pending_count()

        #TODO: abstract out logic for assigning which grader to go with.
        grader_settings_path = os.path.join(settings.GRADER_SETTINGS_DIRECTORY,
                                            sub.grader_settings)
        grader_settings = grader_util.get_grader_settings(grader_settings_path)

        if grader_settings['grader_type'] == "ML":
            success = ml_grading_util.check_for_all_model_and_rubric_success(
                sub.location)
            if (((subs_graded_by_instructor + subs_pending_instructor) >=
                 settings.MIN_TO_USE_ML) and success):
                sub.next_grader_type = "ML"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "PE":
            #Ensures that there will be some calibration essays before peer grading begins!
            #Calibration essays can be added using command line utility, or through normal instructor grading.
            if ((subs_graded_by_instructor + subs_pending_instructor) >=
                    settings.MIN_TO_USE_PEER):
                sub.next_grader_type = "PE"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "IN":
            sub.next_grader_type = "IN"
        else:
            log.exception("Invalid grader type specified in settings file.")
            return False

        sub.preferred_grader_type = grader_settings['grader_type']

        #Do duplicate checks
        is_duplicate, is_plagiarized, duplicate_id = grader_util.check_is_duplicate_and_plagiarized(
            sub.student_response, sub.location, sub.student_id,
            sub.preferred_grader_type)
        sub.is_duplicate = is_duplicate
        sub.is_plagiarized = is_plagiarized
        sub.duplicate_submission_id = duplicate_id
        sub.has_been_duplicate_checked = True
        statsd.increment(
            "open_ended_assessment.grading_controller.controller.xqueue_interface.handle_submission.duplicates",
            tags=[
                "duplicate:{0}".format(is_duplicate),
                "is_plagiarized:{0}".format(is_plagiarized)
            ])

        sub.save()

    except Exception:
        log.exception("Submission creation failed!")
        return False

    transaction.commit_unless_managed()
    return True
Exemplo n.º 15
0
def get_single_instructor_grading_item_for_location_with_options(
        location,
        check_for_ml=True,
        types_to_check_for=None,
        submission_state_to_check_for=SubmissionState.waiting_to_be_graded):
    """
    Returns a single instructor grading item for a given location
    Input:
        Problem location, boolean check_for_ML, which dictates whether or not problems should be returned
        to the instructor if there is already an ML model trained for this location or not.  If True, then
        it does not return submissions for which an ML model has already been trained.
    Output:
        Boolean success/fail, and then either error message or submission id of a valid submission.
    """

    if not types_to_check_for:
        types_to_check_for = "IN"

    log.debug(
        "Looking for  location {0}, state {1}, next_grader_type {2}".format(
            location, submission_state_to_check_for, types_to_check_for))

    subs_graded = finished_submissions_graded_by_instructor(location).count()
    subs_pending = submissions_pending_instructor(
        location, state_in=[SubmissionState.being_graded]).count()
    success = ml_grading_util.check_for_all_model_and_rubric_success(location)

    if ((subs_graded + subs_pending) < settings.MIN_TO_USE_ML
            or not success) or not check_for_ml:
        to_be_graded = Submission.objects.filter(
            location=location,
            state=submission_state_to_check_for,
            next_grader_type=types_to_check_for,
        )

        #Order by confidence if we are looking for finished ML submissions
        finished_submission_text = submission_text_graded_by_instructor(
            location)
        if types_to_check_for == "ML" and submission_state_to_check_for == SubmissionState.finished:
            to_be_graded = to_be_graded.filter(
                grader__status_code=GraderStatus.success).order_by(
                    'grader__confidence')

        to_be_graded_count = to_be_graded.count()
        log.debug("Looking for  location {0} and got count {1}".format(
            location, to_be_graded_count))

        for i in xrange(0, to_be_graded_count):
            #In some cases, this causes a model query error without the try/except block due to the checked out state
            try:
                to_be_graded_obj = to_be_graded[i]
            except:
                return False, 0
            if to_be_graded_obj is not None and to_be_graded_obj.student_response not in finished_submission_text:
                to_be_graded_obj.state = SubmissionState.being_graded
                to_be_graded_obj.next_grader_type = "IN"
                to_be_graded_obj.save()
                found = True
                sub_id = to_be_graded_obj.id

                #Insert timing initialization code
                initialize_timing(sub_id)

                return found, sub_id

        #If nothing is found, return false
    return False, 0