def put_result(request): """ Used by external interfaces to post results back to controller """ if request.method != 'POST': return util._error_response("'put_result' must use HTTP POST", _INTERFACE_VERSION) else: post_data = request.POST.dict().copy() for tag in ['feedback', 'submission_id', 'grader_type', 'status', 'confidence', 'grader_id', 'score', 'errors', 'rubric_scores_complete', 'rubric_scores']: if not post_data.has_key(tag): return util._error_response("Failed to find needed key {0}.".format(tag), _INTERFACE_VERSION) #list comprehension below just gets all available grader types ['ML','IN', etc if post_data['grader_type'] not in [i[0] for i in GRADER_TYPE]: return util._error_response("Invalid grader type {0}.".format(post_data['grader_type']),_INTERFACE_VERSION) #list comprehension below gets all available status codes ['F',"S'] if post_data['status'] not in [i[0] for i in STATUS_CODES]: return util._error_response("Invalid grader status.".format(post_data['status']), _INTERFACE_VERSION) try: post_data['score'] = int(post_data['score']) except Exception: return util._error_response("Can't parse score {0} into an int.".format(post_data['score']), _INTERFACE_VERSION) try: sub=Submission.objects.get(id=int(post_data['submission_id'])) except Exception: return util._error_response( "Submission id {0} is not valid.".format(post_data.get('submission_id', "NA")), _INTERFACE_VERSION, ) rubric_scores_complete = request.POST.get('rubric_scores_complete', False) rubric_scores = request.POST.get('rubric_scores', []) try: rubric_scores=json.loads(rubric_scores) except Exception: pass success, error_message = grader_util.validate_rubric_scores(rubric_scores, rubric_scores_complete, sub) if not success: return util._error_response( error_message, _INTERFACE_VERSION, ) post_data['rubric_scores']=rubric_scores post_data['rubric_scores_complete'] = rubric_scores_complete success, header = grader_util.create_and_handle_grader_object(post_data) if not success: return util._error_response("Could not save grader.", _INTERFACE_VERSION) util.log_connection_data() return util._success_response({'message' : "Saved successfully."}, _INTERFACE_VERSION)
def put_result(request): """ Used by external interfaces to post results back to controller """ if request.method != 'POST': return util._error_response("'put_result' must use HTTP POST", _INTERFACE_VERSION) else: post_data = request.POST.dict().copy() for tag in ['feedback', 'submission_id', 'grader_type', 'status', 'confidence', 'grader_id', 'score', 'errors', 'rubric_scores_complete', 'rubric_scores']: if not post_data.has_key(tag): return util._error_response("Failed to find needed key {0}.".format(tag), _INTERFACE_VERSION) #list comprehension below just gets all available grader types ['ML','IN', etc if post_data['grader_type'] not in [i[0] for i in GRADER_TYPE]: return util._error_response("Invalid grader type {0}.".format(post_data['grader_type']),_INTERFACE_VERSION) #list comprehension below gets all available status codes ['F',"S'] if post_data['status'] not in [i[0] for i in STATUS_CODES]: return util._error_response("Invalid grader status.".format(post_data['status']), _INTERFACE_VERSION) try: post_data['score'] = int(post_data['score']) except: return util._error_response("Can't parse score {0} into an int.".format(post_data['score']), _INTERFACE_VERSION) try: sub=Submission.objects.get(id=int(post_data['submission_id'])) except: return util._error_response( "Submission id {0} is not valid.".format(post_data.get('submission_id', "NA")), _INTERFACE_VERSION, ) rubric_scores_complete = request.POST.get('rubric_scores_complete', False) rubric_scores = request.POST.get('rubric_scores', []) try: rubric_scores=json.loads(rubric_scores) except: pass success, error_message = grader_util.validate_rubric_scores(rubric_scores, rubric_scores_complete, sub) if not success: return util._error_response( error_message, _INTERFACE_VERSION, ) post_data['rubric_scores']=rubric_scores post_data['rubric_scores_complete'] = rubric_scores_complete success, header = grader_util.create_and_handle_grader_object(post_data) if not success: return util._error_response("Could not save grader.", _INTERFACE_VERSION) util.log_connection_data() return util._success_response({'message' : "Saved successfully."}, _INTERFACE_VERSION)
def handle_submission(sub): """ Handles a new submission. Decides what the next grader should be and saves it. Input: sub - A Submission object from controller.models Output: True/False status code """ try: #Run some basic sanity checks on submission. Also check to see if student is banned, and fail them if they are. sub.next_grader_type = "BC" sub.save() transaction.commit_unless_managed() timing_functions.initialize_timing(sub.id) success, check_dict = basic_check_util.simple_quality_check(sub.student_response, sub.initial_display, sub.student_id, sub.skip_basic_checks) if not success: log.exception("could not run basic checks on {0}".format(sub.student_response)) #add additional tags needed to create a grader object check_dict = grader_util.add_additional_tags_to_dict(check_dict, sub.id) if check_dict['score']==0: success, max_rubric_scores = rubric_functions.generate_targets_from_rubric(sub.rubric) log.debug(max_rubric_scores) if success: check_dict['rubric_scores_complete'] = True check_dict['rubric_scores'] = [0 for i in xrange(0,len(max_rubric_scores))] log.debug(check_dict) #Create and handle the grader, and return grader_util.create_and_handle_grader_object(check_dict) #If the checks result in a score of 0 (out of 1), then the submission fails basic sanity checks #Return to student and don't process further if check_dict['score'] == 0: return True else: sub.state = SubmissionState.waiting_to_be_graded #Assign whether grader should be ML or IN based on number of graded examples. subs_graded_by_instructor, subs_pending_instructor = staff_grading_util.count_submissions_graded_and_pending_instructor( sub.location) #TODO: abstract out logic for assigning which grader to go with. grader_settings_path = os.path.join(settings.GRADER_SETTINGS_DIRECTORY, sub.grader_settings) grader_settings = grader_util.get_grader_settings(grader_settings_path) if grader_settings['grader_type'] == "ML": success= ml_grading_util.check_for_all_model_and_rubric_success(sub.location) if(((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_ML) and success): sub.next_grader_type = "ML" else: sub.next_grader_type = "IN" elif grader_settings['grader_type'] == "PE": #Ensures that there will be some calibration essays before peer grading begins! #Calibration essays can be added using command line utility, or through normal instructor grading. if((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_PEER): sub.next_grader_type = "PE" else: sub.next_grader_type = "IN" elif grader_settings['grader_type'] == "IN": sub.next_grader_type = "IN" else: log.exception("Invalid grader type specified in settings file.") return False sub.preferred_grader_type=grader_settings['grader_type'] #Do duplicate checks is_duplicate, is_plagiarized, duplicate_id = grader_util.check_is_duplicate_and_plagiarized(sub.student_response, sub.location, sub.student_id, sub.preferred_grader_type) sub.is_duplicate=is_duplicate sub.is_plagiarized = is_plagiarized sub.duplicate_submission_id = duplicate_id sub.has_been_duplicate_checked = True statsd.increment("open_ended_assessment.grading_controller.controller.xqueue_interface.handle_submission.duplicates", tags=[ "duplicate:{0}".format(is_duplicate), "is_plagiarized:{0}".format(is_plagiarized) ]) sub.save() log.debug("Submission object created successfully!") except: log.exception("Submission creation failed!") return False transaction.commit_unless_managed() return True
def handle_submission(sub): """ Handles a new submission. Decides what the next grader should be and saves it. Input: sub - A Submission object from controller.models Output: True/False status code """ try: #Run some basic sanity checks on submission. Also check to see if student is banned, and fail them if they are. sub.next_grader_type = "BC" sub.save() transaction.commit_unless_managed() success, check_dict = basic_check_util.simple_quality_check( sub.student_response, sub.initial_display, sub.student_id, sub.skip_basic_checks) if not success: log.exception("could not run basic checks on {0}".format( sub.student_response)) #add additional tags needed to create a grader object check_dict = grader_util.add_additional_tags_to_dict( check_dict, sub.id) if check_dict['score'] == 0: success, max_rubric_scores = rubric_functions.generate_targets_from_rubric( sub.rubric) if success: check_dict['rubric_scores_complete'] = True check_dict['rubric_scores'] = [ 0 for i in xrange(0, len(max_rubric_scores)) ] #Create and handle the grader, and return grader_util.create_and_handle_grader_object(check_dict) #If the checks result in a score of 0 (out of 1), then the submission fails basic sanity checks #Return to student and don't process further if check_dict['score'] == 0: return True else: sub.state = SubmissionState.waiting_to_be_graded sl = staff_grading_util.StaffLocation(sub.location) #Assign whether grader should be ML or IN based on number of graded examples. subs_graded_by_instructor, subs_pending_instructor = sl.graded_count( ), sl.pending_count() #TODO: abstract out logic for assigning which grader to go with. grader_settings_path = os.path.join(settings.GRADER_SETTINGS_DIRECTORY, sub.grader_settings) grader_settings = grader_util.get_grader_settings(grader_settings_path) if grader_settings['grader_type'] == "ML": success = ml_grading_util.check_for_all_model_and_rubric_success( sub.location) if (((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_ML) and success): sub.next_grader_type = "ML" else: sub.next_grader_type = "IN" elif grader_settings['grader_type'] == "PE": #Ensures that there will be some calibration essays before peer grading begins! #Calibration essays can be added using command line utility, or through normal instructor grading. if ((subs_graded_by_instructor + subs_pending_instructor) >= settings.MIN_TO_USE_PEER): sub.next_grader_type = "PE" else: sub.next_grader_type = "IN" elif grader_settings['grader_type'] == "IN": sub.next_grader_type = "IN" else: log.exception("Invalid grader type specified in settings file.") return False sub.preferred_grader_type = grader_settings['grader_type'] #Do duplicate checks is_duplicate, is_plagiarized, duplicate_id = grader_util.check_is_duplicate_and_plagiarized( sub.student_response, sub.location, sub.student_id, sub.preferred_grader_type) sub.is_duplicate = is_duplicate sub.is_plagiarized = is_plagiarized sub.duplicate_submission_id = duplicate_id sub.has_been_duplicate_checked = True statsd.increment( "open_ended_assessment.grading_controller.controller.xqueue_interface.handle_submission.duplicates", tags=[ "duplicate:{0}".format(is_duplicate), "is_plagiarized:{0}".format(is_plagiarized) ]) sub.save() except Exception: log.exception("Submission creation failed!") return False transaction.commit_unless_managed() return True