def is_grading_workflow_complete(workflow_uuid): """ Check whether the grading workflow is complete. Args: workflow_uuid (str): The UUID of the grading workflow Returns: bool Raises: AIGradingRequestError AIGradingInternalError """ try: return AIGradingWorkflow.is_workflow_complete(workflow_uuid) except AIGradingWorkflow.DoesNotExist: msg = ( u"Could not retrieve grading workflow " u"with uuid {uuid} to check whether it's complete." ).format(uuid=workflow_uuid) raise AIGradingRequestError(msg) except DatabaseError: msg = ( u"An unexpected error occurred while checking " u"the grading workflow with uuid {uuid} for completeness" ).format(uuid=workflow_uuid) raise AIGradingInternalError(msg)
def is_grading_workflow_complete(workflow_uuid): """ Check whether the grading workflow is complete. Args: workflow_uuid (str): The UUID of the grading workflow Returns: bool Raises: AIGradingRequestError AIGradingInternalError """ try: return AIGradingWorkflow.is_workflow_complete(workflow_uuid) except AIGradingWorkflow.DoesNotExist: msg = (u"Could not retrieve grading workflow " u"with uuid {uuid} to check whether it's complete.").format( uuid=workflow_uuid) raise AIGradingRequestError(msg) except DatabaseError: msg = ( u"An unexpected error occurred while checking " u"the grading workflow with uuid {uuid} for completeness").format( uuid=workflow_uuid) raise AIGradingInternalError(msg)
def handle(self, *args, **options): """ Execute the command. Args: course_id (unicode): The ID of the course to create submissions/workflows in. item_id (unicode): The ID of the problem in the course. num_submissions (int): The number of submissions/workflows to create. algorithm_id (unicode): The ID of the ML algorithm to use ("fake" or "ease") Raises: CommandError """ if len(args) < 4: raise CommandError(u"Usage: simulate_ai_grading_error {}".format( self.args)) # Parse arguments course_id = args[0].decode('utf-8') item_id = args[1].decode('utf-8') num_submissions = int(args[2]) algorithm_id = args[3].decode('utf-8') # Create the rubric model rubric = rubric_from_dict(self.RUBRIC) # Train classifiers print u"Training classifiers using {algorithm_id}...".format( algorithm_id=algorithm_id) algorithm = AIAlgorithm.algorithm_for_id(algorithm_id) classifier_data = { criterion_name: algorithm.train_classifier(example) for criterion_name, example in self.EXAMPLES.iteritems() } print u"Successfully trained classifiers." # Create the classifier set classifier_set = AIClassifierSet.create_classifier_set( classifier_data, rubric, algorithm_id, course_id, item_id) print u"Successfully created classifier set with id {}".format( classifier_set.pk) # Create submissions and grading workflows for num in range(num_submissions): student_item = { 'course_id': course_id, 'item_id': item_id, 'item_type': 'openassessment', 'student_id': "{base}_{num}".format(base=self.STUDENT_ID, num=num) } submission = sub_api.create_submission(student_item, self.ANSWER) workflow = AIGradingWorkflow.start_workflow( submission['uuid'], self.RUBRIC, algorithm_id) workflow.classifier_set = classifier_set workflow.save() print u"{num}: Created incomplete grading workflow with UUID {uuid}".format( num=num, uuid=workflow.uuid)
def handle(self, *args, **options): """ Execute the command. Args: course_id (unicode): The ID of the course to create submissions/workflows in. item_id (unicode): The ID of the problem in the course. num_submissions (int): The number of submissions/workflows to create. algorithm_id (unicode): The ID of the ML algorithm to use ("fake" or "ease") Raises: CommandError """ if len(args) < 4: raise CommandError(u"Usage: simulate_ai_grading_error {}".format(self.args)) # Parse arguments course_id = args[0].decode('utf-8') item_id = args[1].decode('utf-8') num_submissions = int(args[2]) algorithm_id = args[3].decode('utf-8') # Create the rubric model rubric = rubric_from_dict(self.RUBRIC) # Train classifiers print u"Training classifiers using {algorithm_id}...".format(algorithm_id=algorithm_id) algorithm = AIAlgorithm.algorithm_for_id(algorithm_id) classifier_data = { criterion_name: algorithm.train_classifier(example) for criterion_name, example in self.EXAMPLES.iteritems() } print u"Successfully trained classifiers." # Create the classifier set classifier_set = AIClassifierSet.create_classifier_set( classifier_data, rubric, algorithm_id, course_id, item_id ) print u"Successfully created classifier set with id {}".format(classifier_set.pk) # Create submissions and grading workflows for num in range(num_submissions): student_item = { 'course_id': course_id, 'item_id': item_id, 'item_type': 'openassessment', 'student_id': "{base}_{num}".format(base=self.STUDENT_ID, num=num) } submission = sub_api.create_submission(student_item, self.ANSWER) workflow = AIGradingWorkflow.start_workflow( submission['uuid'], self.RUBRIC, algorithm_id ) workflow.classifier_set = classifier_set workflow.save() print u"{num}: Created incomplete grading workflow with UUID {uuid}".format( num=num, uuid=workflow.uuid )
def _assert_complete(self, training_done=None, grading_done=None): """ Asserts that the Training and Grading are of a given completion status Serves as an assertion for a number of unit tests. Args: training_done (bool): whether the user expects there to be unfinished training workflows grading_done (bool): whether the user expects there to be unfinished grading workflows """ incomplete_training_workflows = AITrainingWorkflow.get_incomplete_workflows(course_id=COURSE_ID, item_id=ITEM_ID) incomplete_grading_workflows = AIGradingWorkflow.get_incomplete_workflows(course_id=COURSE_ID, item_id=ITEM_ID) if training_done is not None: self.assertEqual(self._is_empty_generator(incomplete_training_workflows), training_done) if grading_done is not None: self.assertEqual(self._is_empty_generator(incomplete_grading_workflows), grading_done)
def setUp(self): """ Create a submission and grading workflow. """ # Create a submission submission = sub_api.create_submission(STUDENT_ITEM, ANSWER) self.submission_uuid = submission['uuid'] # Create a workflow for the submission workflow = AIGradingWorkflow.start_workflow(self.submission_uuid, RUBRIC, ALGORITHM_ID) self.workflow_uuid = workflow.uuid # Associate the workflow with classifiers rubric = rubric_from_dict(RUBRIC) classifier_set = AIClassifierSet.create_classifier_set( self.CLASSIFIERS, rubric, ALGORITHM_ID, STUDENT_ITEM.get('course_id'), STUDENT_ITEM.get('item_id') ) workflow.classifier_set = classifier_set workflow.save()
def setUp(self): """ Create a submission and grading workflow. """ # Create a submission submission = sub_api.create_submission(STUDENT_ITEM, ANSWER) self.submission_uuid = submission['uuid'] # Create a workflow for the submission workflow = AIGradingWorkflow.start_workflow(self.submission_uuid, RUBRIC, ALGORITHM_ID) self.workflow_uuid = workflow.uuid # Associate the workflow with classifiers rubric = rubric_from_dict(RUBRIC) classifier_set = AIClassifierSet.create_classifier_set( self.CLASSIFIERS, rubric, ALGORITHM_ID, STUDENT_ITEM.get('course_id'), STUDENT_ITEM.get('item_id')) workflow.classifier_set = classifier_set workflow.save()
def _assert_complete(self, training_done=None, grading_done=None): """ Asserts that the Training and Grading are of a given completion status Serves as an assertion for a number of unit tests. Args: training_done (bool): whether the user expects there to be unfinished training workflows grading_done (bool): whether the user expects there to be unfinished grading workflows """ incomplete_training_workflows = AITrainingWorkflow.get_incomplete_workflows( course_id=COURSE_ID, item_id=ITEM_ID) incomplete_grading_workflows = AIGradingWorkflow.get_incomplete_workflows( course_id=COURSE_ID, item_id=ITEM_ID) if training_done is not None: self.assertEqual( self._is_empty_generator(incomplete_training_workflows), training_done) if grading_done is not None: self.assertEqual( self._is_empty_generator(incomplete_grading_workflows), grading_done)
def on_init(submission_uuid, rubric=None, algorithm_id=None): """ Submit a response for AI assessment. This will: (a) create a workflow (database record) to track the grading task (b) if classifiers exist for the rubric, schedule an asynchronous grading task. Args: submission_uuid (str): The UUID of the submission to assess. Keyword Arguments: rubric (dict): Serialized rubric model. algorithm_id (unicode): Use only classifiers trained with the specified algorithm. Returns: grading_workflow_uuid (str): The UUID of the grading workflow. Usually the caller of `submit()` won't need this (since the workers are parameterized by grading workflow UUID), but it's useful for testing. Raises: AIGradingRequestError AIGradingInternalError Example Usage: >>> on_init('74a9d63e8a5fea369fd391d07befbd86ae4dc6e2', rubric, 'ease') '10df7db776686822e501b05f452dc1e4b9141fe5' """ if rubric is None: raise AIGradingRequestError(u'No rubric provided') if algorithm_id is None: raise AIGradingRequestError(u'No algorithm ID provided') try: workflow = AIGradingWorkflow.start_workflow(submission_uuid, rubric, algorithm_id) except (sub_api.SubmissionNotFoundError, sub_api.SubmissionRequestError) as ex: msg = ( u"An error occurred while retrieving the " u"submission with UUID {uuid}: {ex}" ).format(uuid=submission_uuid, ex=ex) raise AIGradingRequestError(msg) except InvalidRubric as ex: msg = ( u"An error occurred while parsing the serialized " u"rubric {rubric}: {ex}" ).format(rubric=rubric, ex=ex) raise AIGradingRequestError(msg) except (sub_api.SubmissionInternalError, DatabaseError) as ex: msg = ( u"An unexpected error occurred while submitting an " u"essay for AI grading: {ex}" ).format(ex=ex) logger.exception(msg) raise AIGradingInternalError(msg) # If we find classifiers for this rubric/algorithm # then associate the classifiers with the workflow # and schedule a grading task. # Otherwise, the task will need to be scheduled later, # once the classifiers have been trained. if workflow.classifier_set is not None: try: grading_tasks.grade_essay.apply_async(args=[workflow.uuid]) logger.info(( u"Scheduled grading task for AI grading workflow with UUID {workflow_uuid} " u"(submission UUID = {sub_uuid}, algorithm ID = {algorithm_id})" ).format(workflow_uuid=workflow.uuid, sub_uuid=submission_uuid, algorithm_id=algorithm_id)) return workflow.uuid except (DatabaseError,) + ANTICIPATED_CELERY_ERRORS as ex: msg = ( u"An unexpected error occurred while scheduling the " u"AI grading task for the submission with UUID {uuid}: {ex}" ).format(uuid=submission_uuid, ex=ex) logger.exception(msg) raise AIGradingInternalError(msg) else: logger.info(( u"Cannot schedule a grading task for AI grading workflow with UUID {workflow_uuid} " u"because no classifiers are available for the rubric associated with submission {sub_uuid} " u"for the algorithm {algorithm_id}" ).format(workflow_uuid=workflow.uuid, sub_uuid=submission_uuid, algorithm_id=algorithm_id))