def grade(self, submission): if isinstance(submission, basestring): return grade.grade(self.model_data, submission) else: return grade.grade_generic(self.model_data, submission.get('numeric_features', []), submission.get('textual_features', []))
def grade(self, submission): if isinstance(submission, str): return grade.grade(self.model_data, submission) else: return grade.grade_generic(self.model_data, submission.get('numeric_values', []), submission.get('textual_values', []))
def handle_single_item(controller_session): sub_get_success, content = get_item_from_controller(controller_session) #Grade and handle here if sub_get_success: transaction.commit() sub = Submission.objects.get(id=int(content['submission_id'])) sl = staff_grading_util.StaffLocation(sub.location) subs_graded_by_instructor = sl.graded() first_sub = subs_graded_by_instructor.order_by('date_created')[0] parsed_rubric=rubric_functions.parse_rubric(first_sub.rubric) #strip out unicode and other characters in student response #Needed, or grader may potentially fail #TODO: Handle unicode in student responses properly student_response = sub.student_response.encode('ascii', 'ignore') #Get the latest created model for the given location transaction.commit() location_suffixes=ml_grading_util.generate_rubric_location_suffixes(subs_graded_by_instructor, grading = True) if len(location_suffixes)>0: rubric_scores_complete=True rubric_scores=[] for m in xrange(0,len(location_suffixes)): suffix = location_suffixes[m] success, created_model=ml_grading_util.get_latest_created_model(sub.location + suffix) if not success: log.error("Could not identify a valid created model!") if m==0: results= RESULT_FAILURE_DICT formatted_feedback="error" status=GraderStatus.failure statsd.increment("open_ended_assessment.grading_controller.call_ml_grader", tags=["success:False"]) else: #Create grader path from location in submission grader_path = os.path.join(settings.ML_MODEL_PATH,created_model.model_relative_path) model_stored_in_s3=created_model.model_stored_in_s3 success, grader_data=load_model_file(created_model,use_full_path=False) if success: results = grade.grade(grader_data, student_response) else: results=RESULT_FAILURE_DICT #If the above fails, try using the full path in the created_model object if not results['success'] and not created_model.model_stored_in_s3: grader_path=created_model.model_full_path try: success, grader_data=load_model_file(created_model,use_full_path=True) if success: results = grade.grade(grader_data, student_response) else: results=RESULT_FAILURE_DICT except Exception: error_message="Could not find a valid model file." log.exception(error_message) results=RESULT_FAILURE_DICT log.info("ML Grader: Success: {0} Errors: {1}".format(results['success'], results['errors'])) statsd.increment("open_ended_assessment.grading_controller.call_ml_grader", tags=["success:{0}".format(results['success']), 'location:{0}'.format(sub.location)]) #Set grader status according to success/fail if results['success']: status = GraderStatus.success else: status = GraderStatus.failure if m==0: final_results=results elif results['success']==False: rubric_scores_complete = False else: rubric_scores.append(int(results['score'])) if len(rubric_scores)==0: rubric_scores_complete=False grader_dict = { 'score': int(final_results['score']), 'feedback': json.dumps(results['feedback']), 'status': status, 'grader_id': 1, 'grader_type': "ML", 'confidence': results['confidence'], 'submission_id': sub.id, 'errors' : ' ' .join(results['errors']), 'rubric_scores_complete' : rubric_scores_complete, 'rubric_scores' : json.dumps(rubric_scores), } #Create grader object in controller by posting back results created, msg = util._http_post( controller_session, urlparse.urljoin(settings.GRADING_CONTROLLER_INTERFACE['url'], project_urls.ControllerURLs.put_result), grader_dict, settings.REQUESTS_TIMEOUT, ) else: log.error("Error getting item from controller or no items to get.") statsd.increment("open_ended_assessment.grading_controller.call_ml_grader", tags=["success:False"]) util.log_connection_data() return sub_get_success
def handle_single_essay(essay): #Needed to ensure that the DB is not wrapped in a transaction and pulls old data transaction.commit_unless_managed() #strip out unicode and other characters in student response #Needed, or grader may potentially fail #TODO: Handle unicode in student responses properly student_response = essay.essay_text.encode('ascii', 'ignore') #Gets both the max scores for each target and the number of targets target_max_scores = json.loads(essay.problem.max_target_scores) target_counts = len(target_max_scores) target_scores=[] for m in xrange(0,target_counts): #Gets latest model for a given problem and target success, created_model=ml_grading_util.get_latest_created_model(essay.problem,m) if not success: error_message = "Could not identify a valid created model!" log.error(error_message) results= RESULT_FAILURE_DICT formatted_feedback="error" return False, error_message #Create grader path from location in submission grader_path = os.path.join(settings.ML_MODEL_PATH,created_model.model_relative_path) #Indicates whether the model is stored locally or in the cloud model_stored_in_s3=created_model.model_stored_in_s3 #Try to load the model file success, grader_data=load_model_file(created_model,use_full_path=False) if success: #Send to ML grading algorithm to be graded results = grade.grade(grader_data, student_response) else: results=RESULT_FAILURE_DICT #If the above fails, try using the full path in the created_model object if not results['success'] and not created_model.model_stored_in_s3: #Before, we used the relative path to load. Possible that the full path may work grader_path=created_model.model_full_path try: success, grader_data=load_model_file(created_model,use_full_path=True) if success: results = grade.grade(grader_data, student_response) else: results=RESULT_FAILURE_DICT except: error_message="Could not find a valid model file." log.exception(error_message) results=RESULT_FAILURE_DICT if m==0: final_results=results if results['success'] == False: error_message = "Unsuccessful grading: {0}".format(results) log.exception(error_message) return False, error_message target_scores.append(int(results['score'])) grader_dict = { 'essay' : essay, 'target_scores' : json.dumps(target_scores), 'grader_type' : GraderTypes.machine, 'feedback' : '', 'annotated_text' : '', 'premium_feedback_scores' : json.dumps([]), 'success' :final_results['success'], 'confidence' : final_results['confidence'], } # Create grader object in controller by posting back results essay_grade = EssayGrade(**grader_dict) essay_grade.save() #Update the essay so that it doesn't keep trying to re-grade essay.has_been_ml_graded = True essay.save() transaction.commit_unless_managed() return True, "Successfully scored!"
def handle_single_item(controller_session): sub_get_success, content = get_item_from_controller(controller_session) #Grade and handle here if sub_get_success: transaction.commit() sub = Submission.objects.get(id=int(content['submission_id'])) sl = staff_grading_util.StaffLocation(sub.location) subs_graded_by_instructor = sl.graded() first_sub = subs_graded_by_instructor.order_by('date_created')[0] parsed_rubric = rubric_functions.parse_rubric(first_sub.rubric) #strip out unicode and other characters in student response #Needed, or grader may potentially fail #TODO: Handle unicode in student responses properly student_response = sub.student_response.encode('ascii', 'ignore') #Get the latest created model for the given location transaction.commit() location_suffixes = ml_grading_util.generate_rubric_location_suffixes( subs_graded_by_instructor, grading=True) if len(location_suffixes) > 0: rubric_scores_complete = True rubric_scores = [] for m in xrange(0, len(location_suffixes)): suffix = location_suffixes[m] success, created_model = ml_grading_util.get_latest_created_model( sub.location + suffix) if not success: log.error("Could not identify a valid created model!") if m == 0: results = RESULT_FAILURE_DICT formatted_feedback = "error" status = GraderStatus.failure statsd.increment( "open_ended_assessment.grading_controller.call_ml_grader", tags=["success:False"]) else: #Create grader path from location in submission grader_path = os.path.join(settings.ML_MODEL_PATH, created_model.model_relative_path) model_stored_in_s3 = created_model.model_stored_in_s3 success, grader_data = load_model_file(created_model, use_full_path=False) if success: results = grade.grade(grader_data, student_response) else: results = RESULT_FAILURE_DICT #If the above fails, try using the full path in the created_model object if not results[ 'success'] and not created_model.model_stored_in_s3: grader_path = created_model.model_full_path try: success, grader_data = load_model_file( created_model, use_full_path=True) if success: results = grade.grade(grader_data, student_response) else: results = RESULT_FAILURE_DICT except Exception: error_message = "Could not find a valid model file." log.exception(error_message) results = RESULT_FAILURE_DICT log.info("ML Grader: Success: {0} Errors: {1}".format( results['success'], results['errors'])) statsd.increment( "open_ended_assessment.grading_controller.call_ml_grader", tags=[ "success:{0}".format(results['success']), 'location:{0}'.format(sub.location) ]) #Set grader status according to success/fail if results['success']: status = GraderStatus.success else: status = GraderStatus.failure if m == 0: final_results = results elif results['success'] == False: rubric_scores_complete = False else: rubric_scores.append(int(results['score'])) if len(rubric_scores) == 0: rubric_scores_complete = False grader_dict = { 'score': int(final_results['score']), 'feedback': json.dumps(results['feedback']), 'status': status, 'grader_id': 1, 'grader_type': "ML", 'confidence': results['confidence'], 'submission_id': sub.id, 'errors': ' '.join(results['errors']), 'rubric_scores_complete': rubric_scores_complete, 'rubric_scores': json.dumps(rubric_scores), } #Create grader object in controller by posting back results created, msg = util._http_post( controller_session, urlparse.urljoin(settings.GRADING_CONTROLLER_INTERFACE['url'], project_urls.ControllerURLs.put_result), grader_dict, settings.REQUESTS_TIMEOUT, ) else: log.error("Error getting item from controller or no items to get.") statsd.increment( "open_ended_assessment.grading_controller.call_ml_grader", tags=["success:False"]) util.log_connection_data() return sub_get_success
def handle_single_essay(essay): # Needed to ensure that the DB is not wrapped in a transaction and pulls old data transaction.commit() # strip out unicode and other characters in student response # Needed, or grader may potentially fail # TODO: Handle unicode in student responses properly student_response = essay.essay_text.encode('ascii', 'ignore') # Gets both the max scores for each target and the number of targets target_max_scores = json.loads(essay.problem.max_target_scores) target_counts = len(target_max_scores) target_scores = [] for m in xrange(0, target_counts): # Gets latest model for a given problem and target success, created_model = ml_grading_util.get_latest_created_model( essay.problem, m) if not success: results = RESULT_FAILURE_DICT formatted_feedback = "error" transaction.commit() return False, formatted_feedback # Try to load the model file success, grader_data = load_model_file(created_model, use_full_path=False) if success: # Send to ML grading algorithm to be graded results = grade.grade(grader_data, student_response) else: results = RESULT_FAILURE_DICT # If the above fails, try using the full path in the created_model object if not results['success'] and not created_model.model_stored_in_s3: try: success, grader_data = load_model_file(created_model, use_full_path=True) if success: results = grade.grade(grader_data, student_response) else: results = RESULT_FAILURE_DICT except: error_message = "Could not find a valid model file." log.exception(error_message) results = RESULT_FAILURE_DICT if m == 0: final_results = results if results['success'] == False: error_message = "Unsuccessful grading: {0}".format(results) log.exception(error_message) transaction.commit() return False, error_message target_scores.append(int(results['score'])) grader_dict = { 'essay': essay, 'target_scores': json.dumps(target_scores), 'grader_type': GraderTypes.machine, 'feedback': '', 'annotated_text': '', 'premium_feedback_scores': json.dumps([]), 'success': final_results['success'], 'confidence': final_results['confidence'], } # Create grader object in controller by posting back results essay_grade = EssayGrade(**grader_dict) essay_grade.save() # Update the essay so that it doesn't keep trying to re-grade essay.has_been_ml_graded = True essay.save() # copy permissions from the essay to the essaygrade helpers.copy_permissions(essay, Essay, essay_grade, EssayGrade) transaction.commit() return True, "Successfully scored!"