Ejemplo n.º 1
0
def get_submission_ml(request):
    """
    Gets a submission for the ML grader
    Input:
        Get request with no parameters
    """
    unique_locations = [x["location"] for x in list(Submission.objects.values("location").distinct())]
    for location in unique_locations:
        subs_graded_by_instructor = staff_grading_util.finished_submissions_graded_by_instructor(location).count()
        success = ml_grading_util.check_for_all_model_and_rubric_success(location)
        if subs_graded_by_instructor >= settings.MIN_TO_USE_ML and success:
            to_be_graded = Submission.objects.filter(
                location=location, state=SubmissionState.waiting_to_be_graded, next_grader_type="ML"
            )
            if to_be_graded.count() > 0:
                to_be_graded = to_be_graded[0]
                if to_be_graded is not None:
                    to_be_graded.state = SubmissionState.being_graded
                    to_be_graded.save()

                    # Insert timing initialization code
                    initialize_timing(to_be_graded)

                    return util._success_response({"submission_id": to_be_graded.id}, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._error_response("Nothing to grade.", _INTERFACE_VERSION)
Ejemplo n.º 2
0
def get_submission_ml(request):
    """
    Gets a submission for the ML grader
    Input:
        Get request with no parameters
    """
    unique_locations = [x['location'] for x in list(Submission.objects.values('location').distinct())]
    for location in unique_locations:
        subs_graded_by_instructor = staff_grading_util.finished_submissions_graded_by_instructor(location).count()
        success = ml_grading_util.check_for_all_model_and_rubric_success(location)
        if subs_graded_by_instructor >= settings.MIN_TO_USE_ML and success:
            to_be_graded = Submission.objects.filter(
                location=location,
                state=SubmissionState.waiting_to_be_graded,
                next_grader_type="ML",
            )
            if(to_be_graded.count() > 0):
                to_be_graded = to_be_graded[0]
                if to_be_graded is not None:
                    to_be_graded.state = SubmissionState.being_graded
                    to_be_graded.save()

                    #Insert timing initialization code
                    initialize_timing(to_be_graded)

                    return util._success_response({'submission_id' : to_be_graded.id}, _INTERFACE_VERSION)

    util.log_connection_data()
    return util._error_response("Nothing to grade.", _INTERFACE_VERSION)
Ejemplo n.º 3
0
def get_single_instructor_grading_item_for_location_with_options(location,check_for_ml=True,types_to_check_for=None,
                                                                 submission_state_to_check_for=SubmissionState.waiting_to_be_graded):
    """
    Returns a single instructor grading item for a given location
    Input:
        Problem location, boolean check_for_ML, which dictates whether or not problems should be returned
        to the instructor if there is already an ML model trained for this location or not.  If True, then
        it does not return submissions for which an ML model has already been trained.
    Output:
        Boolean success/fail, and then either error message or submission id of a valid submission.
    """

    if not types_to_check_for:
        types_to_check_for="IN"

    log.debug("Looking for  location {0}, state {1}, next_grader_type {2}".format(location,
        submission_state_to_check_for, types_to_check_for))

    subs_graded = finished_submissions_graded_by_instructor(location).count()
    subs_pending = submissions_pending_instructor(location, state_in=[SubmissionState.being_graded]).count()
    success= ml_grading_util.check_for_all_model_and_rubric_success(location)

    if ((subs_graded + subs_pending) < settings.MIN_TO_USE_ML or not success) or not check_for_ml:
        to_be_graded = Submission.objects.filter(
            location=location,
            state=submission_state_to_check_for,
            next_grader_type=types_to_check_for,
        )

        #Order by confidence if we are looking for finished ML submissions
        finished_submission_text=submission_text_graded_by_instructor(location)
        if types_to_check_for == "ML" and submission_state_to_check_for == SubmissionState.finished:
            to_be_graded = to_be_graded.filter(grader__status_code=GraderStatus.success).order_by('grader__confidence')

        to_be_graded_count=to_be_graded.count()
        log.debug("Looking for  location {0} and got count {1}".format(location,to_be_graded_count))

        for i in xrange(0,to_be_graded_count):
            #In some cases, this causes a model query error without the try/except block due to the checked out state
            try:
                to_be_graded_obj = to_be_graded[i]
            except:
                return False, 0
            if to_be_graded_obj is not None and to_be_graded_obj.student_response not in finished_submission_text:
                to_be_graded_obj.state = SubmissionState.being_graded
                to_be_graded_obj.next_grader_type="IN"
                to_be_graded_obj.save()
                found = True
                sub_id = to_be_graded_obj.id

                #Insert timing initialization code
                initialize_timing(sub_id)

                return found, sub_id

        #If nothing is found, return false
    return False, 0
Ejemplo n.º 4
0
def get_submission_instructor(request):
    """
    Gets a submission for the Instructor grading view
    """
    try:
        course_id = util._value_or_default(request.GET["course_id"], None)
    except:
        return util._error_response("'get_submission' requires parameter 'course_id'", _INTERFACE_VERSION)

    found, sub_id = staff_grading_util.get_single_instructor_grading_item(course_id)

    if not found:
        return util._error_response("Nothing to grade.", _INTERFACE_VERSION)

    # Insert timing initialization code
    initialize_timing(sub_id)

    util.log_connection_data()
    return util._success_response({"submission_id": sub_id}, _INTERFACE_VERSION)
Ejemplo n.º 5
0
def get_submission_instructor(request):
    """
    Gets a submission for the Instructor grading view
    """
    try:
        course_id = util._value_or_default(request.GET['course_id'], None)
    except:
        return util._error_response("'get_submission' requires parameter 'course_id'", _INTERFACE_VERSION)

    found, sub_id = staff_grading_util.get_single_instructor_grading_item(course_id)

    if not found:
        return util._error_response("Nothing to grade.", _INTERFACE_VERSION)

    #Insert timing initialization code
    initialize_timing(sub_id)

    util.log_connection_data()
    return util._success_response({'submission_id' : sub_id}, _INTERFACE_VERSION)
Ejemplo n.º 6
0
def get_submission_peer(request):
    """
    Gets a submission for the Peer grading view
    """
    try:
        location = util._value_or_default(request.GET["location"], None)
        grader_id = util._value_or_default(request.GET["grader_id"], None)
    except KeyError:
        return util._error_response("'get_submission' requires parameters 'location', 'grader_id'", _INTERFACE_VERSION)

    found, sub_id = peer_grading_util.get_single_peer_grading_item(location, grader_id)

    if not found:
        return util._error_response("Nothing to grade.", _INTERFACE_VERSION)

    # Insert timing initialization code
    initialize_timing(sub_id)

    util.log_connection_data()
    return util._success_response({"submission_id": sub_id}, _INTERFACE_VERSION)
Ejemplo n.º 7
0
def get_submission_peer(request):
    """
    Gets a submission for the Peer grading view
    """
    try:
        location = util._value_or_default(request.GET['location'], None)
        grader_id = util._value_or_default(request.GET['grader_id'], None)
    except KeyError:
        return util._error_response("'get_submission' requires parameters 'location', 'grader_id'", _INTERFACE_VERSION)

    found, sub_id = peer_grading_util.get_single_peer_grading_item(location, grader_id)

    if not found:
        return util._error_response("Nothing to grade.", _INTERFACE_VERSION)

    #Insert timing initialization code
    initialize_timing(sub_id)

    util.log_connection_data()
    return util._success_response({'submission_id' : sub_id}, _INTERFACE_VERSION)
Ejemplo n.º 8
0
def get_single_peer_grading_item(location, grader_id):
    """
    Gets peer grading for a given location and grader.
    Returns one submission id corresponding to the location and the grader.
    Input:
        location - problem location.
        grader_id - student id of the peer grader
    Returns:
        found - Boolean indicating whether or not something to grade was found
        sub_id - If found, the id of a submission to grade
    """
    found = False
    sub_id = 0
    to_be_graded = peer_grading_submissions_pending_for_location(
        location, grader_id)
    #Do some checks to ensure that there are actually items to grade
    if to_be_graded is not None:
        to_be_graded_length = to_be_graded.count()
        if to_be_graded_length > 0:
            course_id = to_be_graded[0].course_id
            submissions_to_grade = (to_be_graded.filter(
                grader__status_code=GraderStatus.success,
                grader__grader_type__in=[
                    "PE", "BC"
                ]).exclude(grader__grader_id=grader_id).annotate(
                    num_graders=Count('grader')).values(
                        "num_graders", "id").order_by("num_graders")[:50])

            if submissions_to_grade is not None:
                submission_grader_counts = [
                    p['num_graders'] for p in submissions_to_grade
                ]
                #log.debug("Submissions to grade with graders: {0} {1}".format(submission_grader_counts, submissions_to_grade))

                submission_ids = [p['id'] for p in submissions_to_grade]

                student_profile_success, profile_dict = utilize_student_metrics.get_student_profile(
                    grader_id, course_id)
                #Ensure that student hasn't graded this submission before!
                #Also ensures that all submissions are searched through if student has graded the minimum one
                fallback_sub_id = None
                for i in xrange(0, len(submission_ids)):
                    #log.debug("Looping through graders, on {0}".format(i))
                    minimum_index = submission_grader_counts.index(
                        min(submission_grader_counts))
                    grade_item = Submission.objects.get(
                        id=int(submission_ids[minimum_index]))
                    previous_graders = [
                        p.grader_id
                        for p in grade_item.get_successful_peer_graders()
                    ]
                    if grader_id not in previous_graders:
                        found = True
                        sub_id = grade_item.id

                        #Insert timing initialization code
                        if fallback_sub_id is None:
                            fallback_sub_id = grade_item.id

                        if not student_profile_success:
                            initialize_timing(sub_id)
                            grade_item.state = SubmissionState.being_graded
                            grade_item.save()
                            return found, sub_id
                        else:
                            success, similarity_score = utilize_student_metrics.get_similarity_score(
                                profile_dict, grade_item.student_id, course_id)
                            log.debug(similarity_score)
                            if similarity_score <= settings.PEER_GRADER_MIN_SIMILARITY_FOR_MATCHING:
                                initialize_timing(sub_id)
                                grade_item.state = SubmissionState.being_graded
                                grade_item.save()
                                return found, sub_id
                    else:
                        if len(submission_ids) > 1:
                            submission_ids.pop(minimum_index)
                            submission_grader_counts.pop(minimum_index)
                if found:
                    initialize_timing(fallback_sub_id)
                    grade_item = Submission.objects.get(id=fallback_sub_id)
                    grade_item.state = SubmissionState.being_graded
                    grade_item.save()
                    return found, fallback_sub_id

    return found, sub_id
Ejemplo n.º 9
0
def handle_submission(sub):
    """
    Handles a new submission.  Decides what the next grader should be and saves it.
    Input:
        sub - A Submission object from controller.models

    Output:
        True/False status code
    """
    try:
        #Run some basic sanity checks on submission.  Also check to see if student is banned, and fail them if they are.
        sub.next_grader_type = "BC"
        sub.save()
        transaction.commit_unless_managed()
        timing_functions.initialize_timing(sub.id)
        success, check_dict = basic_check_util.simple_quality_check(sub.student_response,
            sub.initial_display, sub.student_id, sub.skip_basic_checks)
        if not success:
            log.exception("could not run basic checks on {0}".format(sub.student_response))

        #add additional tags needed to create a grader object
        check_dict = grader_util.add_additional_tags_to_dict(check_dict, sub.id)
        if check_dict['score']==0:
            success, max_rubric_scores = rubric_functions.generate_targets_from_rubric(sub.rubric)
            log.debug(max_rubric_scores)
            if success:
                check_dict['rubric_scores_complete'] = True
                check_dict['rubric_scores'] = [0 for i in xrange(0,len(max_rubric_scores))]
                log.debug(check_dict)

        #Create and handle the grader, and return
        grader_util.create_and_handle_grader_object(check_dict)

        #If the checks result in a score of 0 (out of 1), then the submission fails basic sanity checks
        #Return to student and don't process further
        if check_dict['score'] == 0:
            return True
        else:
            sub.state = SubmissionState.waiting_to_be_graded

        #Assign whether grader should be ML or IN based on number of graded examples.
        subs_graded_by_instructor, subs_pending_instructor = staff_grading_util.count_submissions_graded_and_pending_instructor(
            sub.location)

        #TODO: abstract out logic for assigning which grader to go with.
        grader_settings_path = os.path.join(settings.GRADER_SETTINGS_DIRECTORY, sub.grader_settings)
        grader_settings = grader_util.get_grader_settings(grader_settings_path)


        if grader_settings['grader_type'] == "ML":
            success= ml_grading_util.check_for_all_model_and_rubric_success(sub.location)
            if(((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_ML) and success):
                sub.next_grader_type = "ML"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "PE":
            #Ensures that there will be some calibration essays before peer grading begins!
            #Calibration essays can be added using command line utility, or through normal instructor grading.
            if((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_PEER):
                sub.next_grader_type = "PE"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "IN":
            sub.next_grader_type = "IN"
        else:
            log.exception("Invalid grader type specified in settings file.")
            return False

        sub.preferred_grader_type=grader_settings['grader_type']

        #Do duplicate checks
        is_duplicate, is_plagiarized, duplicate_id = grader_util.check_is_duplicate_and_plagiarized(sub.student_response, sub.location, sub.student_id, sub.preferred_grader_type)
        sub.is_duplicate=is_duplicate
        sub.is_plagiarized = is_plagiarized
        sub.duplicate_submission_id = duplicate_id
        sub.has_been_duplicate_checked = True
        statsd.increment("open_ended_assessment.grading_controller.controller.xqueue_interface.handle_submission.duplicates",
            tags=[
                "duplicate:{0}".format(is_duplicate),
                "is_plagiarized:{0}".format(is_plagiarized)
                ])

        sub.save()
        log.debug("Submission object created successfully!")

    except:
        log.exception("Submission creation failed!")
        return False

    transaction.commit_unless_managed()
    return True
Ejemplo n.º 10
0
def get_single_peer_grading_item(location, grader_id):
    """
    Gets peer grading for a given location and grader.
    Returns one submission id corresponding to the location and the grader.
    Input:
        location - problem location.
        grader_id - student id of the peer grader
    Returns:
        found - Boolean indicating whether or not something to grade was found
        sub_id - If found, the id of a submission to grade
    """
    found = False
    sub_id = 0
    to_be_graded = peer_grading_submissions_pending_for_location(location, grader_id) 
    #Do some checks to ensure that there are actually items to grade
    if to_be_graded is not None:
        to_be_graded_length = to_be_graded.count()
        if to_be_graded_length > 0:
            course_id = to_be_graded[0].course_id
            submissions_to_grade = (to_be_graded
                                    .filter(grader__status_code=GraderStatus.success, grader__grader_type__in=["PE","BC"])
                                    .exclude(grader__grader_id=grader_id)
                                    .annotate(num_graders=Count('grader'))
                                    .values("num_graders", "id")
                                    .order_by("num_graders")[:50])

            if submissions_to_grade is not None:
                submission_grader_counts = [p['num_graders'] for p in submissions_to_grade]
                #log.debug("Submissions to grade with graders: {0} {1}".format(submission_grader_counts, submissions_to_grade))

                submission_ids = [p['id'] for p in submissions_to_grade]

                student_profile_success, profile_dict = utilize_student_metrics.get_student_profile(grader_id, course_id)
                #Ensure that student hasn't graded this submission before!
                #Also ensures that all submissions are searched through if student has graded the minimum one
                fallback_sub_id = None
                for i in xrange(0, len(submission_ids)):
                    #log.debug("Looping through graders, on {0}".format(i))
                    minimum_index = submission_grader_counts.index(min(submission_grader_counts))
                    grade_item = Submission.objects.get(id=int(submission_ids[minimum_index]))
                    previous_graders = [p.grader_id for p in grade_item.get_successful_peer_graders()]
                    if grader_id not in previous_graders:
                        found = True
                        sub_id = grade_item.id

                        #Insert timing initialization code
                        if fallback_sub_id is None:
                            fallback_sub_id = grade_item.id

                        if not student_profile_success:
                            initialize_timing(sub_id)
                            grade_item.state = SubmissionState.being_graded
                            grade_item.save()
                            return found, sub_id
                        else:
                            success, similarity_score = utilize_student_metrics.get_similarity_score(profile_dict, grade_item.student_id, course_id)
                            log.debug(similarity_score)
                            if similarity_score <= settings.PEER_GRADER_MIN_SIMILARITY_FOR_MATCHING:
                                initialize_timing(sub_id)
                                grade_item.state = SubmissionState.being_graded
                                grade_item.save()
                                return found, sub_id
                    else:
                        if len(submission_ids) > 1:
                            submission_ids.pop(minimum_index)
                            submission_grader_counts.pop(minimum_index)
                if found:
                    initialize_timing(fallback_sub_id)
                    grade_item = Submission.objects.get(id=fallback_sub_id)
                    grade_item.state = SubmissionState.being_graded
                    grade_item.save()
                    return found, fallback_sub_id

    return found, sub_id
Ejemplo n.º 11
0
def get_single_instructor_grading_item_for_location_with_options(
        location,
        check_for_ml=True,
        types_to_check_for=None,
        submission_state_to_check_for=SubmissionState.waiting_to_be_graded):
    """
    Returns a single instructor grading item for a given location
    Input:
        Problem location, boolean check_for_ML, which dictates whether or not problems should be returned
        to the instructor if there is already an ML model trained for this location or not.  If True, then
        it does not return submissions for which an ML model has already been trained.
    Output:
        Boolean success/fail, and then either error message or submission id of a valid submission.
    """

    if not types_to_check_for:
        types_to_check_for = "IN"

    log.debug(
        "Looking for  location {0}, state {1}, next_grader_type {2}".format(
            location, submission_state_to_check_for, types_to_check_for))

    subs_graded = finished_submissions_graded_by_instructor(location).count()
    subs_pending = submissions_pending_instructor(
        location, state_in=[SubmissionState.being_graded]).count()
    success = ml_grading_util.check_for_all_model_and_rubric_success(location)

    if ((subs_graded + subs_pending) < settings.MIN_TO_USE_ML
            or not success) or not check_for_ml:
        to_be_graded = Submission.objects.filter(
            location=location,
            state=submission_state_to_check_for,
            next_grader_type=types_to_check_for,
        )

        #Order by confidence if we are looking for finished ML submissions
        finished_submission_text = submission_text_graded_by_instructor(
            location)
        if types_to_check_for == "ML" and submission_state_to_check_for == SubmissionState.finished:
            to_be_graded = to_be_graded.filter(
                grader__status_code=GraderStatus.success).order_by(
                    'grader__confidence')

        to_be_graded_count = to_be_graded.count()
        log.debug("Looking for  location {0} and got count {1}".format(
            location, to_be_graded_count))

        for i in xrange(0, to_be_graded_count):
            #In some cases, this causes a model query error without the try/except block due to the checked out state
            try:
                to_be_graded_obj = to_be_graded[i]
            except:
                return False, 0
            if to_be_graded_obj is not None and to_be_graded_obj.student_response not in finished_submission_text:
                to_be_graded_obj.state = SubmissionState.being_graded
                to_be_graded_obj.next_grader_type = "IN"
                to_be_graded_obj.save()
                found = True
                sub_id = to_be_graded_obj.id

                #Insert timing initialization code
                initialize_timing(sub_id)

                return found, sub_id

        #If nothing is found, return false
    return False, 0
Ejemplo n.º 12
0
def handle_submission(sub):
    """
    Handles a new submission.  Decides what the next grader should be and saves it.
    Input:
        sub - A Submission object from controller.models

    Output:
        True/False status code
    """
    try:
        #Run some basic sanity checks on submission.  Also check to see if student is banned, and fail them if they are.
        sub.next_grader_type = "BC"
        sub.save()
        transaction.commit_unless_managed()
        timing_functions.initialize_timing(sub.id)
        success, check_dict = basic_check_util.simple_quality_check(sub.student_response,
            sub.initial_display, sub.student_id, sub.skip_basic_checks)
        if not success:
            log.exception("could not run basic checks on {0}".format(sub.student_response))

        #add additional tags needed to create a grader object
        check_dict = grader_util.add_additional_tags_to_dict(check_dict, sub.id)
        if check_dict['score']==0:
            success, max_rubric_scores = rubric_functions.generate_targets_from_rubric(sub.rubric)
            log.debug(max_rubric_scores)
            if success:
                check_dict['rubric_scores_complete'] = True
                check_dict['rubric_scores'] = [0 for i in xrange(0,len(max_rubric_scores))]
                log.debug(check_dict)

        #Create and handle the grader, and return
        grader_util.create_and_handle_grader_object(check_dict)

        #If the checks result in a score of 0 (out of 1), then the submission fails basic sanity checks
        #Return to student and don't process further
        if check_dict['score'] == 0:
            return True
        else:
            sub.state = SubmissionState.waiting_to_be_graded

        #Assign whether grader should be ML or IN based on number of graded examples.
        subs_graded_by_instructor, subs_pending_instructor = staff_grading_util.count_submissions_graded_and_pending_instructor(
            sub.location)

        #TODO: abstract out logic for assigning which grader to go with.
        grader_settings_path = os.path.join(settings.GRADER_SETTINGS_DIRECTORY, sub.grader_settings)
        grader_settings = grader_util.get_grader_settings(grader_settings_path)


        if grader_settings['grader_type'] == "ML":
            success= ml_grading_util.check_for_all_model_and_rubric_success(sub.location)
            if(((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_ML) and success):
                sub.next_grader_type = "ML"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "PE":
            #Ensures that there will be some calibration essays before peer grading begins!
            #Calibration essays can be added using command line utility, or through normal instructor grading.
            if((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_PEER):
                sub.next_grader_type = "PE"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "IN":
            sub.next_grader_type = "IN"
        else:
            log.exception("Invalid grader type specified in settings file.")
            return False

        sub.preferred_grader_type=grader_settings['grader_type']

        #Do duplicate checks
        is_duplicate, is_plagiarized, duplicate_id = grader_util.check_is_duplicate_and_plagiarized(sub.student_response, sub.location, sub.student_id, sub.preferred_grader_type)
        sub.is_duplicate=is_duplicate
        sub.is_plagiarized = is_plagiarized
        sub.duplicate_submission_id = duplicate_id
        statsd.increment("open_ended_assessment.grading_controller.controller.xqueue_interface.handle_submission.duplicates",
            tags=[
                "duplicate:{0}".format(is_duplicate),
                "is_plagiarized:{0}".format(is_plagiarized)
                ])

        sub.save()
        log.debug("Submission object created successfully!")

    except:
        log.exception("Submission creation failed!")
        return False

    transaction.commit_unless_managed()
    return True