def save_grade(request): """ Supports POST requests with the following arguments: course_id: int grader_id: int submission_id: int score: int feedback: string Returns json dict with keys version: int success: bool error: string, present if not success """ if request.method != "POST": return util._error_response("Request needs to be GET", _INTERFACE_VERSION) course_id = request.POST.get('course_id') grader_id = request.POST.get('grader_id') submission_id = request.POST.get('submission_id') score = request.POST.get('score') feedback = request.POST.get('feedback') skipped = request.POST.get('skipped')=="True" rubric_scores_complete = request.POST.get('rubric_scores_complete', False) rubric_scores = request.POST.getlist('rubric_scores', []) is_submission_flagged = request.POST.get('submission_flagged', False) if isinstance(is_submission_flagged, basestring): is_submission_flagged = is_submission_flagged.lower() == 'true' if (# These have to be truthy not (course_id and grader_id and submission_id) or # These have to be non-None score is None or feedback is None): return util._error_response("required_parameter_missing", _INTERFACE_VERSION) if skipped: success, sub = staff_grading_util.set_instructor_grading_item_back_to_preferred_grader(submission_id) if not success: return util._error_response(sub, _INTERFACE_VERSION) return util._success_response({}, _INTERFACE_VERSION) try: score = int(score) except ValueError: return util._error_response( "grade_save_error", _INTERFACE_VERSION, data={"msg": "Expected integer score. Got {0}".format(score)}) try: sub=Submission.objects.get(id=submission_id) except Exception: return util._error_response( "grade_save_error", _INTERFACE_VERSION, data={"msg": "Submission id {0} is not valid.".format(submission_id)} ) first_sub_for_location=Submission.objects.filter(location=sub.location).order_by('date_created')[0] rubric= first_sub_for_location.rubric rubric_success, parsed_rubric = rubric_functions.parse_rubric(rubric) if rubric_success: success, error_message = grader_util.validate_rubric_scores(rubric_scores, rubric_scores_complete, sub) if not success: return util._error_response( "grade_save_error", _INTERFACE_VERSION, data={"msg": error_message} ) d = {'submission_id': submission_id, 'score': score, 'feedback': feedback, 'grader_id': grader_id, 'grader_type': 'IN', # Humans always succeed (if they grade at all)... 'status': GraderStatus.success, # ...and they're always confident too. 'confidence': 1.0, #And they don't make errors 'errors' : "", 'rubric_scores_complete' : rubric_scores_complete, 'rubric_scores' : rubric_scores, 'is_submission_flagged' : is_submission_flagged, } success, header = grader_util.create_and_handle_grader_object(d) if not success: return util._error_response("grade_save_error", _INTERFACE_VERSION, data={'msg': 'Internal error'}) util.log_connection_data() return util._success_response({}, _INTERFACE_VERSION)
def save_grade(request): """ Supports POST requests with the following arguments: course_id: int grader_id: int submission_id: int score: int feedback: string Returns json dict with keys version: int success: bool error: string, present if not success """ if request.method != "POST": return util._error_response("Request needs to be GET", _INTERFACE_VERSION) course_id = request.POST.get('course_id') grader_id = request.POST.get('grader_id') submission_id = request.POST.get('submission_id') score = request.POST.get('score') feedback = request.POST.get('feedback') skipped = request.POST.get('skipped') == "True" rubric_scores_complete = request.POST.get('rubric_scores_complete', False) rubric_scores = request.POST.getlist('rubric_scores', []) is_submission_flagged = request.POST.get('submission_flagged', False) if isinstance(is_submission_flagged, basestring): is_submission_flagged = is_submission_flagged.lower() == 'true' if ( # These have to be truthy not (course_id and grader_id and submission_id) or # These have to be non-None score is None or feedback is None): return util._error_response("required_parameter_missing", _INTERFACE_VERSION) if skipped: log.debug(submission_id) success, sub = staff_grading_util.set_instructor_grading_item_back_to_ml( submission_id) if not success: return util._error_response(sub, _INTERFACE_VERSION) return util._success_response({}, _INTERFACE_VERSION) try: score = int(score) except ValueError: return util._error_response( "grade_save_error", _INTERFACE_VERSION, data={"msg": "Expected integer score. Got {0}".format(score)}) try: sub = Submission.objects.get(id=submission_id) except: return util._error_response( "grade_save_error", _INTERFACE_VERSION, data={ "msg": "Submission id {0} is not valid.".format(submission_id) }) first_sub_for_location = Submission.objects.filter( location=sub.location).order_by('date_created')[0] rubric = first_sub_for_location.rubric rubric_success, parsed_rubric = rubric_functions.parse_rubric(rubric) if rubric_success: success, error_message = grader_util.validate_rubric_scores( rubric_scores, rubric_scores_complete, sub) if not success: return util._error_response("grade_save_error", _INTERFACE_VERSION, data={"msg": error_message}) d = { 'submission_id': submission_id, 'score': score, 'feedback': feedback, 'grader_id': grader_id, 'grader_type': 'IN', # Humans always succeed (if they grade at all)... 'status': GraderStatus.success, # ...and they're always confident too. 'confidence': 1.0, #And they don't make errors 'errors': "", 'rubric_scores_complete': rubric_scores_complete, 'rubric_scores': rubric_scores, 'is_submission_flagged': is_submission_flagged, } success, header = grader_util.create_and_handle_grader_object(d) if not success: return util._error_response("grade_save_error", _INTERFACE_VERSION, data={'msg': 'Internal error'}) util.log_connection_data() return util._success_response({}, _INTERFACE_VERSION)
def handle_single_item(controller_session): sub_get_success, content = get_item_from_controller(controller_session) #Grade and handle here if sub_get_success: transaction.commit() sub = Submission.objects.get(id=int(content['submission_id'])) sl = staff_grading_util.StaffLocation(sub.location) subs_graded_by_instructor = sl.graded() first_sub = subs_graded_by_instructor.order_by('date_created')[0] parsed_rubric=rubric_functions.parse_rubric(first_sub.rubric) #strip out unicode and other characters in student response #Needed, or grader may potentially fail #TODO: Handle unicode in student responses properly student_response = sub.student_response.encode('ascii', 'ignore') #Get the latest created model for the given location transaction.commit() location_suffixes=ml_grading_util.generate_rubric_location_suffixes(subs_graded_by_instructor, grading = True) if len(location_suffixes)>0: rubric_scores_complete=True rubric_scores=[] for m in xrange(0,len(location_suffixes)): suffix = location_suffixes[m] success, created_model=ml_grading_util.get_latest_created_model(sub.location + suffix) if not success: log.error("Could not identify a valid created model!") if m==0: results= RESULT_FAILURE_DICT formatted_feedback="error" status=GraderStatus.failure statsd.increment("open_ended_assessment.grading_controller.call_ml_grader", tags=["success:False"]) else: #Create grader path from location in submission grader_path = os.path.join(settings.ML_MODEL_PATH,created_model.model_relative_path) model_stored_in_s3=created_model.model_stored_in_s3 success, grader_data=load_model_file(created_model,use_full_path=False) if success: results = grade.grade(grader_data, student_response) else: results=RESULT_FAILURE_DICT #If the above fails, try using the full path in the created_model object if not results['success'] and not created_model.model_stored_in_s3: grader_path=created_model.model_full_path try: success, grader_data=load_model_file(created_model,use_full_path=True) if success: results = grade.grade(grader_data, student_response) else: results=RESULT_FAILURE_DICT except Exception: error_message="Could not find a valid model file." log.exception(error_message) results=RESULT_FAILURE_DICT log.info("ML Grader: Success: {0} Errors: {1}".format(results['success'], results['errors'])) statsd.increment("open_ended_assessment.grading_controller.call_ml_grader", tags=["success:{0}".format(results['success']), 'location:{0}'.format(sub.location)]) #Set grader status according to success/fail if results['success']: status = GraderStatus.success else: status = GraderStatus.failure if m==0: final_results=results elif results['success']==False: rubric_scores_complete = False else: rubric_scores.append(int(results['score'])) if len(rubric_scores)==0: rubric_scores_complete=False grader_dict = { 'score': int(final_results['score']), 'feedback': json.dumps(results['feedback']), 'status': status, 'grader_id': 1, 'grader_type': "ML", 'confidence': results['confidence'], 'submission_id': sub.id, 'errors' : ' ' .join(results['errors']), 'rubric_scores_complete' : rubric_scores_complete, 'rubric_scores' : json.dumps(rubric_scores), } #Create grader object in controller by posting back results created, msg = util._http_post( controller_session, urlparse.urljoin(settings.GRADING_CONTROLLER_INTERFACE['url'], project_urls.ControllerURLs.put_result), grader_dict, settings.REQUESTS_TIMEOUT, ) else: log.error("Error getting item from controller or no items to get.") statsd.increment("open_ended_assessment.grading_controller.call_ml_grader", tags=["success:False"]) util.log_connection_data() return sub_get_success
def handle_single_item(controller_session): sub_get_success, content = get_item_from_controller(controller_session) #Grade and handle here if sub_get_success: transaction.commit() sub = Submission.objects.get(id=int(content['submission_id'])) sl = staff_grading_util.StaffLocation(sub.location) subs_graded_by_instructor = sl.graded() first_sub = subs_graded_by_instructor.order_by('date_created')[0] parsed_rubric = rubric_functions.parse_rubric(first_sub.rubric) #strip out unicode and other characters in student response #Needed, or grader may potentially fail #TODO: Handle unicode in student responses properly student_response = sub.student_response.encode('ascii', 'ignore') #Get the latest created model for the given location transaction.commit() location_suffixes = ml_grading_util.generate_rubric_location_suffixes( subs_graded_by_instructor, grading=True) if len(location_suffixes) > 0: rubric_scores_complete = True rubric_scores = [] for m in xrange(0, len(location_suffixes)): suffix = location_suffixes[m] success, created_model = ml_grading_util.get_latest_created_model( sub.location + suffix) if not success: log.error("Could not identify a valid created model!") if m == 0: results = RESULT_FAILURE_DICT formatted_feedback = "error" status = GraderStatus.failure statsd.increment( "open_ended_assessment.grading_controller.call_ml_grader", tags=["success:False"]) else: #Create grader path from location in submission grader_path = os.path.join(settings.ML_MODEL_PATH, created_model.model_relative_path) model_stored_in_s3 = created_model.model_stored_in_s3 success, grader_data = load_model_file(created_model, use_full_path=False) if success: results = grade.grade(grader_data, student_response) else: results = RESULT_FAILURE_DICT #If the above fails, try using the full path in the created_model object if not results[ 'success'] and not created_model.model_stored_in_s3: grader_path = created_model.model_full_path try: success, grader_data = load_model_file( created_model, use_full_path=True) if success: results = grade.grade(grader_data, student_response) else: results = RESULT_FAILURE_DICT except Exception: error_message = "Could not find a valid model file." log.exception(error_message) results = RESULT_FAILURE_DICT log.info("ML Grader: Success: {0} Errors: {1}".format( results['success'], results['errors'])) statsd.increment( "open_ended_assessment.grading_controller.call_ml_grader", tags=[ "success:{0}".format(results['success']), 'location:{0}'.format(sub.location) ]) #Set grader status according to success/fail if results['success']: status = GraderStatus.success else: status = GraderStatus.failure if m == 0: final_results = results elif results['success'] == False: rubric_scores_complete = False else: rubric_scores.append(int(results['score'])) if len(rubric_scores) == 0: rubric_scores_complete = False grader_dict = { 'score': int(final_results['score']), 'feedback': json.dumps(results['feedback']), 'status': status, 'grader_id': 1, 'grader_type': "ML", 'confidence': results['confidence'], 'submission_id': sub.id, 'errors': ' '.join(results['errors']), 'rubric_scores_complete': rubric_scores_complete, 'rubric_scores': json.dumps(rubric_scores), } #Create grader object in controller by posting back results created, msg = util._http_post( controller_session, urlparse.urljoin(settings.GRADING_CONTROLLER_INTERFACE['url'], project_urls.ControllerURLs.put_result), grader_dict, settings.REQUESTS_TIMEOUT, ) else: log.error("Error getting item from controller or no items to get.") statsd.increment( "open_ended_assessment.grading_controller.call_ml_grader", tags=["success:False"]) util.log_connection_data() return sub_get_success