def schedule_training(self, data, suffix=''): # pylint: disable=W0613 """ Schedule a new training task for example-based grading. """ assessment = self.get_assessment_module('example-based-assessment') student_item_dict = self.get_student_item_dict() if assessment: examples = assessment["examples"] try: workflow_uuid = ai_api.train_classifiers( create_rubric_dict(self.prompts, self.rubric_criteria_with_labels), convert_training_examples_list_to_dict(examples), student_item_dict.get('course_id'), student_item_dict.get('item_id'), assessment["algorithm_id"] ) return { 'success': True, 'workflow_uuid': workflow_uuid, 'msg': self._(u"Training scheduled with new Workflow UUID: {uuid}".format(uuid=workflow_uuid)) } except AIError as err: return { 'success': False, 'msg': self._(u"An error occurred scheduling classifier training: {error}".format(error=err)) } else: return { 'success': False, 'msg': self._(u"Example Based Assessment is not configured for this location.") }
def _validate_assessment_examples(rubric_dict, assessments): """ Validate assessment training examples. Args: rubric_dict (dict): The serialized rubric model. assessments (list of dict): List of assessment dictionaries. Returns: tuple (is_valid, msg) where is_valid is a boolean indicating whether the assessment is semantically valid and msg describes any validation errors found. """ for asmnt in assessments: if asmnt['name'] == 'student-training': examples = convert_training_examples_list_to_dict(asmnt['examples']) # Delegate to the student training API to validate the # examples against the rubric. errors = validate_training_examples(rubric_dict, examples) if errors: return False, "\n".join(errors) return True, u''
def validate_assessment_examples(rubric_dict, assessments, _): """ Validate assessment training examples. Args: rubric_dict (dict): The serialized rubric model. assessments (list of dict): List of assessment dictionaries. _ (function): The service function used to get the appropriate i18n text Returns: tuple (is_valid, msg) where is_valid is a boolean indicating whether the assessment is semantically valid and msg describes any validation errors found. """ for asmnt in assessments: if asmnt['name'] == 'student-training': examples = convert_training_examples_list_to_dict( asmnt['examples']) # Must have at least one training example if len(examples) == 0: return False, _( u"Learner training must have at least one training example." ) # Delegate to the student training API to validate the # examples against the rubric. errors = validate_training_examples(rubric_dict, examples) if errors: return False, "; ".join(errors) return True, u''
def _validate_assessment_examples(rubric_dict, assessments): """ Validate assessment training examples. Args: rubric_dict (dict): The serialized rubric model. assessments (list of dict): List of assessment dictionaries. Returns: tuple (is_valid, msg) where is_valid is a boolean indicating whether the assessment is semantically valid and msg describes any validation errors found. """ for asmnt in assessments: if asmnt['name'] == 'student-training': examples = convert_training_examples_list_to_dict(asmnt['examples']) # Delegate to the student training API to validate the # examples against the rubric. errors = validate_training_examples(rubric_dict, examples) if errors: return False, "\n".join(errors) return True, u''
def validate_assessment_examples(rubric_dict, assessments, _): """ Validate assessment training examples. Args: rubric_dict (dict): The serialized rubric model. assessments (list of dict): List of assessment dictionaries. _ (function): The service function used to get the appropriate i18n text Returns: tuple (is_valid, msg) where is_valid is a boolean indicating whether the assessment is semantically valid and msg describes any validation errors found. """ for asmnt in assessments: if asmnt['name'] == 'student-training': examples = convert_training_examples_list_to_dict(asmnt['examples']) # Must have at least one training example if len(examples) == 0: return False, _( u"Learner training must have at least one training example." ) # Delegate to the student training API to validate the # examples against the rubric. errors = validate_training_examples(rubric_dict, examples) if errors: return False, "; ".join(errors) return True, u''
def schedule_training(self, data, suffix=''): # pylint: disable=W0613 """ Schedule a new training task for example-based grading. """ assessment = self.get_assessment_module('example-based-assessment') student_item_dict = self.get_student_item_dict() if assessment: examples = assessment["examples"] try: workflow_uuid = ai_api.train_classifiers( create_rubric_dict(self.prompts, self.rubric_criteria_with_labels), convert_training_examples_list_to_dict(examples), student_item_dict.get('course_id'), student_item_dict.get('item_id'), assessment["algorithm_id"] ) return { 'success': True, 'workflow_uuid': workflow_uuid, 'msg': self._(u"Training scheduled with new Workflow UUID: {uuid}".format(uuid=workflow_uuid)) } except AIError as err: return { 'success': False, 'msg': self._(u"An error occurred scheduling classifier training: {error}".format(error=err)) } else: return { 'success': False, 'msg': self._(u"Example Based Assessment is not configured for this location.") }
def training_path_and_context(self): """ Return the template path and context used to render the student training step. Returns: tuple of `(path, context)` where `path` is the path to the template and `context` is a dict. """ # Retrieve the status of the workflow. # If no submissions have been created yet, the status will be None. workflow_status = self.get_workflow_info().get('status') problem_closed, reason, start_date, due_date = self.is_closed( step="student-training") context = {} template = 'openassessmentblock/student_training/student_training_unavailable.html' # add allow_latex field to the context context['allow_latex'] = self.allow_latex if not workflow_status: return template, context # If the student has completed the training step, then show that the step is complete. # We put this condition first so that if a student has completed the step, it *always* # shows as complete. # We're assuming here that the training step always precedes the other assessment steps # (peer/self) -- we may need to make this more flexible later. if workflow_status == 'cancelled': template = 'openassessmentblock/student_training/student_training_cancelled.html' elif workflow_status and workflow_status != "training": template = 'openassessmentblock/student_training/student_training_complete.html' # If the problem is closed, then do not allow students to access the training step elif problem_closed and reason == 'start': context['training_start'] = start_date template = 'openassessmentblock/student_training/student_training_unavailable.html' elif problem_closed and reason == 'due': context['training_due'] = due_date template = 'openassessmentblock/student_training/student_training_closed.html' # If we're on the training step, show the student an example # We do this last so we can avoid querying the student training API if possible. else: training_module = self.get_assessment_module('student-training') if not training_module: return template, context if due_date < DISTANT_FUTURE: context['training_due'] = due_date # Report progress in the student training workflow (completed X out of Y) context['training_num_available'] = len( training_module["examples"]) context[ 'training_num_completed'] = student_training.get_num_completed( self.submission_uuid) context[ 'training_num_current'] = context['training_num_completed'] + 1 # Retrieve the example essay for the student to submit # This will contain the essay text, the rubric, and the options the instructor selected. examples = convert_training_examples_list_to_dict( training_module["examples"]) example = student_training.get_training_example( self.submission_uuid, { 'prompt': self.prompt, 'criteria': self.rubric_criteria_with_labels }, examples) if example: context['training_essay'] = create_submission_dict( {'answer': example['answer']}, self.prompts) context['training_rubric'] = { 'criteria': example['rubric']['criteria'], 'points_possible': example['rubric']['points_possible'] } template = 'openassessmentblock/student_training/student_training.html' else: logger.error( "No training example was returned from the API for student " "with Submission UUID {}".format(self.submission_uuid)) template = "openassessmentblock/student_training/student_training_error.html" return template, context
def training_path_and_context(self): """ Return the template path and context used to render the student training step. Returns: tuple of `(path, context)` where `path` is the path to the template and `context` is a dict. """ # Retrieve the status of the workflow. # If no submissions have been created yet, the status will be None. workflow_status = self.get_workflow_info().get('status') problem_closed, reason, start_date, due_date = self.is_closed(step="student-training") context = {} template = 'openassessmentblock/student_training/student_training_unavailable.html' # add allow_latex field to the context context['allow_latex'] = self.allow_latex if not workflow_status: return template, context # If the student has completed the training step, then show that the step is complete. # We put this condition first so that if a student has completed the step, it *always* # shows as complete. # We're assuming here that the training step always precedes the other assessment steps # (peer/self) -- we may need to make this more flexible later. if workflow_status == 'cancelled': template = 'openassessmentblock/student_training/student_training_cancelled.html' elif workflow_status and workflow_status != "training": template = 'openassessmentblock/student_training/student_training_complete.html' # If the problem is closed, then do not allow students to access the training step elif problem_closed and reason == 'start': context['training_start'] = start_date template = 'openassessmentblock/student_training/student_training_unavailable.html' elif problem_closed and reason == 'due': context['training_due'] = due_date template = 'openassessmentblock/student_training/student_training_closed.html' # If we're on the training step, show the student an example # We do this last so we can avoid querying the student training API if possible. else: training_module = self.get_assessment_module('student-training') if not training_module: return template, context if due_date < DISTANT_FUTURE: context['training_due'] = due_date # Report progress in the student training workflow (completed X out of Y) context['training_num_available'] = len(training_module["examples"]) context['training_num_completed'] = student_training.get_num_completed(self.submission_uuid) context['training_num_current'] = context['training_num_completed'] + 1 # Retrieve the example essay for the student to submit # This will contain the essay text, the rubric, and the options the instructor selected. examples = convert_training_examples_list_to_dict(training_module["examples"]) example = student_training.get_training_example( self.submission_uuid, { 'prompt': self.prompt, 'criteria': self.rubric_criteria_with_labels }, examples ) if example: context['training_essay'] = create_submission_dict({'answer': example['answer']}, self.prompts) context['training_rubric'] = { 'criteria': example['rubric']['criteria'], 'points_possible': example['rubric']['points_possible'] } template = 'openassessmentblock/student_training/student_training.html' else: logger.error( "No training example was returned from the API for student " "with Submission UUID {}".format(self.submission_uuid) ) template = "openassessmentblock/student_training/student_training_error.html" return template, context