Exemple #1
0
def validate_rubric_scores(rubric_scores, rubric_scores_complete, sub):
    success=False
    if rubric_scores_complete!="True":
        return success, "Rubric scores complete is not true: {0}".format(rubric_scores_complete)

    success, targets=rubric_functions.generate_targets_from_rubric(sub.rubric)
    if not success:
        return success, "Cannot generate targets from rubric xml: {0}".format(sub.rubric)

    if not isinstance(rubric_scores,list):
        return success, "Rubric Scores is not a list: {0}".format(rubric_scores)

    if len(rubric_scores)!=len(targets):
        return success, "Number of scores saved does not equal number of targets.  Targets: {0} Rubric Scores: {1}".format(targets, rubric_scores)

    for i in xrange(0,len(rubric_scores)):
        try:
            rubric_scores[i]=int(rubric_scores[i])
        except:
            return success, "Cannot parse score into int".format(rubric_scores[i])

        if rubric_scores[i] < 0 or rubric_scores[i] > targets[i]:
            return success, "Score {0} under 0 or over max score {1}".format(rubric_scores[i], targets[i])
    success = True
    return success , ""
Exemple #2
0
def handle_submission(sub):
    """
    Handles a new submission.  Decides what the next grader should be and saves it.
    Input:
        sub - A Submission object from controller.models

    Output:
        True/False status code
    """
    try:
        #Run some basic sanity checks on submission.  Also check to see if student is banned, and fail them if they are.
        sub.next_grader_type = "BC"
        sub.save()
        transaction.commit_unless_managed()
        timing_functions.initialize_timing(sub.id)
        success, check_dict = basic_check_util.simple_quality_check(sub.student_response,
            sub.initial_display, sub.student_id, sub.skip_basic_checks)
        if not success:
            log.exception("could not run basic checks on {0}".format(sub.student_response))

        #add additional tags needed to create a grader object
        check_dict = grader_util.add_additional_tags_to_dict(check_dict, sub.id)
        if check_dict['score']==0:
            success, max_rubric_scores = rubric_functions.generate_targets_from_rubric(sub.rubric)
            log.debug(max_rubric_scores)
            if success:
                check_dict['rubric_scores_complete'] = True
                check_dict['rubric_scores'] = [0 for i in xrange(0,len(max_rubric_scores))]
                log.debug(check_dict)

        #Create and handle the grader, and return
        grader_util.create_and_handle_grader_object(check_dict)

        #If the checks result in a score of 0 (out of 1), then the submission fails basic sanity checks
        #Return to student and don't process further
        if check_dict['score'] == 0:
            return True
        else:
            sub.state = SubmissionState.waiting_to_be_graded

        #Assign whether grader should be ML or IN based on number of graded examples.
        subs_graded_by_instructor, subs_pending_instructor = staff_grading_util.count_submissions_graded_and_pending_instructor(
            sub.location)

        #TODO: abstract out logic for assigning which grader to go with.
        grader_settings_path = os.path.join(settings.GRADER_SETTINGS_DIRECTORY, sub.grader_settings)
        grader_settings = grader_util.get_grader_settings(grader_settings_path)


        if grader_settings['grader_type'] == "ML":
            success= ml_grading_util.check_for_all_model_and_rubric_success(sub.location)
            if(((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_ML) and success):
                sub.next_grader_type = "ML"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "PE":
            #Ensures that there will be some calibration essays before peer grading begins!
            #Calibration essays can be added using command line utility, or through normal instructor grading.
            if((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_PEER):
                sub.next_grader_type = "PE"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "IN":
            sub.next_grader_type = "IN"
        else:
            log.exception("Invalid grader type specified in settings file.")
            return False

        sub.preferred_grader_type=grader_settings['grader_type']

        #Do duplicate checks
        is_duplicate, is_plagiarized, duplicate_id = grader_util.check_is_duplicate_and_plagiarized(sub.student_response, sub.location, sub.student_id, sub.preferred_grader_type)
        sub.is_duplicate=is_duplicate
        sub.is_plagiarized = is_plagiarized
        sub.duplicate_submission_id = duplicate_id
        sub.has_been_duplicate_checked = True
        statsd.increment("open_ended_assessment.grading_controller.controller.xqueue_interface.handle_submission.duplicates",
            tags=[
                "duplicate:{0}".format(is_duplicate),
                "is_plagiarized:{0}".format(is_plagiarized)
                ])

        sub.save()
        log.debug("Submission object created successfully!")

    except:
        log.exception("Submission creation failed!")
        return False

    transaction.commit_unless_managed()
    return True
Exemple #3
0
def handle_submission(sub):
    """
    Handles a new submission.  Decides what the next grader should be and saves it.
    Input:
        sub - A Submission object from controller.models

    Output:
        True/False status code
    """
    try:
        #Run some basic sanity checks on submission.  Also check to see if student is banned, and fail them if they are.
        sub.next_grader_type = "BC"
        sub.save()
        transaction.commit_unless_managed()
        success, check_dict = basic_check_util.simple_quality_check(
            sub.student_response, sub.initial_display, sub.student_id,
            sub.skip_basic_checks)
        if not success:
            log.exception("could not run basic checks on {0}".format(
                sub.student_response))

        #add additional tags needed to create a grader object
        check_dict = grader_util.add_additional_tags_to_dict(
            check_dict, sub.id)
        if check_dict['score'] == 0:
            success, max_rubric_scores = rubric_functions.generate_targets_from_rubric(
                sub.rubric)
            if success:
                check_dict['rubric_scores_complete'] = True
                check_dict['rubric_scores'] = [
                    0 for i in xrange(0, len(max_rubric_scores))
                ]

        #Create and handle the grader, and return
        grader_util.create_and_handle_grader_object(check_dict)

        #If the checks result in a score of 0 (out of 1), then the submission fails basic sanity checks
        #Return to student and don't process further
        if check_dict['score'] == 0:
            return True
        else:
            sub.state = SubmissionState.waiting_to_be_graded

        sl = staff_grading_util.StaffLocation(sub.location)
        #Assign whether grader should be ML or IN based on number of graded examples.
        subs_graded_by_instructor, subs_pending_instructor = sl.graded_count(
        ), sl.pending_count()

        #TODO: abstract out logic for assigning which grader to go with.
        grader_settings_path = os.path.join(settings.GRADER_SETTINGS_DIRECTORY,
                                            sub.grader_settings)
        grader_settings = grader_util.get_grader_settings(grader_settings_path)

        if grader_settings['grader_type'] == "ML":
            success = ml_grading_util.check_for_all_model_and_rubric_success(
                sub.location)
            if (((subs_graded_by_instructor + subs_pending_instructor) >=
                 settings.MIN_TO_USE_ML) and success):
                sub.next_grader_type = "ML"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "PE":
            #Ensures that there will be some calibration essays before peer grading begins!
            #Calibration essays can be added using command line utility, or through normal instructor grading.
            if ((subs_graded_by_instructor + subs_pending_instructor) >=
                    settings.MIN_TO_USE_PEER):
                sub.next_grader_type = "PE"
            else:
                sub.next_grader_type = "IN"
        elif grader_settings['grader_type'] == "IN":
            sub.next_grader_type = "IN"
        else:
            log.exception("Invalid grader type specified in settings file.")
            return False

        sub.preferred_grader_type = grader_settings['grader_type']

        #Do duplicate checks
        is_duplicate, is_plagiarized, duplicate_id = grader_util.check_is_duplicate_and_plagiarized(
            sub.student_response, sub.location, sub.student_id,
            sub.preferred_grader_type)
        sub.is_duplicate = is_duplicate
        sub.is_plagiarized = is_plagiarized
        sub.duplicate_submission_id = duplicate_id
        sub.has_been_duplicate_checked = True
        statsd.increment(
            "open_ended_assessment.grading_controller.controller.xqueue_interface.handle_submission.duplicates",
            tags=[
                "duplicate:{0}".format(is_duplicate),
                "is_plagiarized:{0}".format(is_plagiarized)
            ])

        sub.save()

    except Exception:
        log.exception("Submission creation failed!")
        return False

    transaction.commit_unless_managed()
    return True