Example #1
0
def reset_ml_subs_to_in():
    """
    Reset submissions marked ML to instructor if there are not enough instructor submissions to grade
    This happens if the instructor skips too many submissions
    """
    counter = 0
    unique_locations = [
        x['location']
        for x in list(Submission.objects.values('location').distinct())
    ]
    for location in unique_locations:
        subs_graded, subs_pending = staff_grading_util.count_submissions_graded_and_pending_instructor(
            location)
        subs_pending_total = Submission.objects.filter(
            location=location,
            state=SubmissionState.waiting_to_be_graded,
            preferred_grader_type="ML").order_by(
                '-date_created')[:settings.MIN_TO_USE_ML]
        if ((subs_graded + subs_pending) < settings.MIN_TO_USE_ML
                and subs_pending_total.count() > subs_pending):
            for sub in subs_pending_total:
                if sub.next_grader_type == "ML" and sub.get_unsuccessful_graders(
                ).count() == 0:
                    staff_grading_util.set_ml_grading_item_back_to_instructor(
                        sub)
                    counter += 1
                if (counter + subs_graded +
                        subs_pending) > settings.MIN_TO_USE_ML:
                    break
    if counter > 0:
        statsd.increment(
            "open_ended_assessment.grading_controller.expire_submissions.reset_ml_subs_to_in",
            tags=["counter:{0}".format(counter)])
        log.debug("Reset {0} submission from ML to IN".format(counter))
Example #2
0
def reset_ml_subs_to_in():
    """
    Reset submissions marked ML to instructor if there are not enough instructor submissions to grade
    This happens if the instructor skips too many submissions
    """
    counter=0
    unique_locations=[x['location'] for x in list(Submission.objects.values('location').distinct())]
    for location in unique_locations:
        subs_graded, subs_pending = staff_grading_util.count_submissions_graded_and_pending_instructor(location)
        subs_pending_total= Submission.objects.filter(
            location=location,
            state=SubmissionState.waiting_to_be_graded,
            preferred_grader_type="ML"
        ).order_by('-date_created')[:settings.MIN_TO_USE_ML]
        if ((subs_graded+subs_pending) < settings.MIN_TO_USE_ML and subs_pending_total.count() > subs_pending):
            for sub in subs_pending_total:
                if sub.next_grader_type=="ML" and sub.get_unsuccessful_graders().count()==0:
                    staff_grading_util.set_ml_grading_item_back_to_instructor(sub)
                    counter+=1
                if (counter+subs_graded + subs_pending)> settings.MIN_TO_USE_ML:
                    break
    if counter>0:
        statsd.increment("open_ended_assessment.grading_controller.expire_submissions.reset_ml_subs_to_in",
            tags=["counter:{0}".format(counter)])
        log.debug("Reset {0} submission from ML to IN".format(counter))
Example #3
0
def handle_submission(sub):
    """
    Handles a new submission.  Decides what the next grader should be and saves it.
    Input:
        sub - A Submission object from controller.models

    Output:
        True/False status code
    """
    try:
        #Run some basic sanity checks on submission.  Also check to see if student is banned, and fail them if they are.
        sub.next_grader_type = "BC"
        sub.save()
        transaction.commit_unless_managed()
        timing_functions.initialize_timing(sub.id)
        success, check_dict = basic_check_util.simple_quality_check(sub.student_response,
            sub.initial_display, sub.student_id, sub.skip_basic_checks)
        if not success:
            log.exception("could not run basic checks on {0}".format(sub.student_response))

        #add additional tags needed to create a grader object
        check_dict = grader_util.add_additional_tags_to_dict(check_dict, sub.id)
        if check_dict['score']==0:
            success, max_rubric_scores = rubric_functions.generate_targets_from_rubric(sub.rubric)
            log.debug(max_rubric_scores)
            if success:
                check_dict['rubric_scores_complete'] = True
                check_dict['rubric_scores'] = [0 for i in xrange(0,len(max_rubric_scores))]
                log.debug(check_dict)

        #Create and handle the grader, and return
        grader_util.create_and_handle_grader_object(check_dict)

        #If the checks result in a score of 0 (out of 1), then the submission fails basic sanity checks
        #Return to student and don't process further
        if check_dict['score'] == 0:
            return True
        else:
            sub.state = SubmissionState.waiting_to_be_graded

        #Assign whether grader should be ML or IN based on number of graded examples.
        subs_graded_by_instructor, subs_pending_instructor = staff_grading_util.count_submissions_graded_and_pending_instructor(
            sub.location)

        #TODO: abstract out logic for assigning which grader to go with.
        grader_settings_path = os.path.join(settings.GRADER_SETTINGS_DIRECTORY, sub.grader_settings)
        grader_settings = grader_util.get_grader_settings(grader_settings_path)


        if grader_settings['grader_type'] == "ML":
            success= ml_grading_util.check_for_all_model_and_rubric_success(sub.location)
            if(((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_ML) and success):
                sub.next_grader_type = "ML"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "PE":
            #Ensures that there will be some calibration essays before peer grading begins!
            #Calibration essays can be added using command line utility, or through normal instructor grading.
            if((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_PEER):
                sub.next_grader_type = "PE"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "IN":
            sub.next_grader_type = "IN"
        else:
            log.exception("Invalid grader type specified in settings file.")
            return False

        sub.preferred_grader_type=grader_settings['grader_type']

        #Do duplicate checks
        is_duplicate, is_plagiarized, duplicate_id = grader_util.check_is_duplicate_and_plagiarized(sub.student_response, sub.location, sub.student_id, sub.preferred_grader_type)
        sub.is_duplicate=is_duplicate
        sub.is_plagiarized = is_plagiarized
        sub.duplicate_submission_id = duplicate_id
        sub.has_been_duplicate_checked = True
        statsd.increment("open_ended_assessment.grading_controller.controller.xqueue_interface.handle_submission.duplicates",
            tags=[
                "duplicate:{0}".format(is_duplicate),
                "is_plagiarized:{0}".format(is_plagiarized)
                ])

        sub.save()
        log.debug("Submission object created successfully!")

    except:
        log.exception("Submission creation failed!")
        return False

    transaction.commit_unless_managed()
    return True
Example #4
0
def handle_submission(sub):
    """
    Handles a new submission.  Decides what the next grader should be and saves it.
    Input:
        sub - A Submission object from controller.models

    Output:
        True/False status code
    """
    try:
        #Run some basic sanity checks on submission.  Also check to see if student is banned, and fail them if they are.
        sub.next_grader_type = "BC"
        sub.save()
        transaction.commit_unless_managed()
        timing_functions.initialize_timing(sub.id)
        success, check_dict = basic_check_util.simple_quality_check(sub.student_response,
            sub.initial_display, sub.student_id, sub.skip_basic_checks)
        if not success:
            log.exception("could not run basic checks on {0}".format(sub.student_response))

        #add additional tags needed to create a grader object
        check_dict = grader_util.add_additional_tags_to_dict(check_dict, sub.id)
        if check_dict['score']==0:
            success, max_rubric_scores = rubric_functions.generate_targets_from_rubric(sub.rubric)
            log.debug(max_rubric_scores)
            if success:
                check_dict['rubric_scores_complete'] = True
                check_dict['rubric_scores'] = [0 for i in xrange(0,len(max_rubric_scores))]
                log.debug(check_dict)

        #Create and handle the grader, and return
        grader_util.create_and_handle_grader_object(check_dict)

        #If the checks result in a score of 0 (out of 1), then the submission fails basic sanity checks
        #Return to student and don't process further
        if check_dict['score'] == 0:
            return True
        else:
            sub.state = SubmissionState.waiting_to_be_graded

        #Assign whether grader should be ML or IN based on number of graded examples.
        subs_graded_by_instructor, subs_pending_instructor = staff_grading_util.count_submissions_graded_and_pending_instructor(
            sub.location)

        #TODO: abstract out logic for assigning which grader to go with.
        grader_settings_path = os.path.join(settings.GRADER_SETTINGS_DIRECTORY, sub.grader_settings)
        grader_settings = grader_util.get_grader_settings(grader_settings_path)


        if grader_settings['grader_type'] == "ML":
            success= ml_grading_util.check_for_all_model_and_rubric_success(sub.location)
            if(((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_ML) and success):
                sub.next_grader_type = "ML"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "PE":
            #Ensures that there will be some calibration essays before peer grading begins!
            #Calibration essays can be added using command line utility, or through normal instructor grading.
            if((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_PEER):
                sub.next_grader_type = "PE"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "IN":
            sub.next_grader_type = "IN"
        else:
            log.exception("Invalid grader type specified in settings file.")
            return False

        sub.preferred_grader_type=grader_settings['grader_type']

        #Do duplicate checks
        is_duplicate, is_plagiarized, duplicate_id = grader_util.check_is_duplicate_and_plagiarized(sub.student_response, sub.location, sub.student_id, sub.preferred_grader_type)
        sub.is_duplicate=is_duplicate
        sub.is_plagiarized = is_plagiarized
        sub.duplicate_submission_id = duplicate_id
        statsd.increment("open_ended_assessment.grading_controller.controller.xqueue_interface.handle_submission.duplicates",
            tags=[
                "duplicate:{0}".format(is_duplicate),
                "is_plagiarized:{0}".format(is_plagiarized)
                ])

        sub.save()
        log.debug("Submission object created successfully!")

    except:
        log.exception("Submission creation failed!")
        return False

    transaction.commit_unless_managed()
    return True