Esempio n. 1
0
def create_assessment(grading_workflow_uuid, criterion_scores):
    """
    Create an AI assessment (complete the AI grading task).

    Args:
        grading_workflow_uuid (str): The UUID of the grading workflow.
        criterion_scores (dict): Dictionary mapping criteria names to integer scores.

    Returns:
        None

    Raises:
        AIGradingRequestError
        AIGradingInternalError

    """
    try:
        workflow = AIGradingWorkflow.objects.get(uuid=grading_workflow_uuid)
    except AIGradingWorkflow.DoesNotExist:
        msg = (u"Could not retrieve the AI grading workflow with uuid {}"
               ).format(grading_workflow_uuid)
        raise AIGradingRequestError(msg)
    except DatabaseError as ex:
        msg = (u"An unexpected error occurred while retrieving the "
               u"AI grading workflow with uuid {uuid}: {ex}").format(
                   uuid=grading_workflow_uuid, ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)

    # Optimization: if the workflow has already been marked complete
    # (perhaps the task was picked up by multiple workers),
    # then we don't need to do anything.
    # Otherwise, create the assessment mark the workflow complete.
    try:
        if not workflow.is_complete:
            workflow.complete(criterion_scores)
            logger.info((
                u"Created assessment for AI grading workflow with UUID {workflow_uuid} "
                u"(algorithm ID {algorithm_id})").format(
                    workflow_uuid=workflow.uuid,
                    algorithm_id=workflow.algorithm_id))
        else:
            msg = u"Grading workflow with UUID {} is already marked complete".format(
                workflow.uuid)
            logger.info(msg)
    except DatabaseError as ex:
        msg = (u"An unexpected error occurred while creating the assessment "
               u"for AI grading workflow with uuid {uuid}: {ex}").format(
                   uuid=grading_workflow_uuid, ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)

    # Fire a signal to update the workflow API
    # This will allow students to receive a score if they're
    # waiting on an AI assessment.
    # The signal receiver is responsible for catching and logging
    # all exceptions that may occur when updating the workflow.
    from openassessment.assessment.signals import assessment_complete_signal
    assessment_complete_signal.send(sender=None,
                                    submission_uuid=workflow.submission_uuid)
Esempio n. 2
0
def get_grading_task_params(grading_workflow_uuid):
    """
    Retrieve the classifier set and algorithm ID
    associated with a particular grading workflow.

    Args:
        grading_workflow_uuid (str): The UUID of the grading workflow.

    Returns:
        dict with keys:
            * essay_text (unicode): The text of the essay submission.
            * classifier_set (dict): Maps criterion names to serialized classifiers.
            * valid_scores (dict): Maps criterion names to a list of valid scores for that criterion.
            * algorithm_id (unicode): ID of the algorithm used to perform training.

    Raises:
        AIGradingRequestError
        AIGradingInternalError

    """
    try:
        workflow = AIGradingWorkflow.objects.get(uuid=grading_workflow_uuid)
    except AIGradingWorkflow.DoesNotExist:
        msg = (u"Could not retrieve the AI grading workflow with uuid {}"
               ).format(grading_workflow_uuid)
        raise AIGradingRequestError(msg)
    except DatabaseError as ex:
        msg = (u"An unexpected error occurred while retrieving the "
               u"AI grading workflow with uuid {uuid}: {ex}").format(
                   uuid=grading_workflow_uuid, ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)

    classifier_set = workflow.classifier_set
    # Though tasks shouldn't be scheduled until classifer set(s) exist, off of the happy path this is a likely
    # occurrence.  Our response is to log this lack of compliance to dependency as an exception, and then thrown
    # an error with the purpose of killing the celery task running this code.
    if classifier_set is None:
        msg = (
            u"AI grading workflow with UUID {} has no classifier set, but was scheduled for grading"
        ).format(grading_workflow_uuid)
        logger.exception(msg)
        raise AIGradingInternalError(msg)

    try:
        return {
            'essay_text': workflow.essay_text,
            'classifier_set':
            workflow.classifier_set.classifier_data_by_criterion,
            'algorithm_id': workflow.algorithm_id,
            'valid_scores': workflow.classifier_set.valid_scores_by_criterion,
        }
    except (DatabaseError, ClassifierSerializeError, IncompleteClassifierSet,
            ValueError, IOError, HTTPException) as ex:
        msg = (u"An unexpected error occurred while retrieving "
               u"classifiers for the grading workflow with UUID {uuid}: {ex}"
               ).format(uuid=grading_workflow_uuid, ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)
Esempio n. 3
0
def get_assessment_scores_by_criteria(submission_uuid):
    """Get the score for each rubric criterion

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessment used to score this submission.

    Returns:
        (dict): A dictionary of rubric criterion names, with a score of
            the example based assessments.

    Raises:
        AIGradingInternalError: If any error occurs while retrieving
            information from the scores, an error is raised.
    """
    try:
        assessments = list(
            Assessment.objects.filter(
                score_type=AI_ASSESSMENT_TYPE, submission_uuid=submission_uuid
            ).order_by('-scored_at')[:1]
        )
        scores = Assessment.scores_by_criterion(assessments)
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = u"Error getting example-based assessment scores for {}".format(submission_uuid)
        logger.exception(error_message)
        raise AIGradingInternalError(error_message)
Esempio n. 4
0
def is_grading_workflow_complete(workflow_uuid):
    """
    Check whether the grading workflow is complete.

    Args:
        workflow_uuid (str): The UUID of the grading workflow

    Returns:
        bool

    Raises:
        AIGradingRequestError
        AIGradingInternalError

    """
    try:
        return AIGradingWorkflow.is_workflow_complete(workflow_uuid)
    except AIGradingWorkflow.DoesNotExist:
        msg = (u"Could not retrieve grading workflow "
               u"with uuid {uuid} to check whether it's complete.").format(
                   uuid=workflow_uuid)
        raise AIGradingRequestError(msg)
    except DatabaseError:
        msg = (
            u"An unexpected error occurred while checking "
            u"the grading workflow with uuid {uuid} for completeness").format(
                uuid=workflow_uuid)
        raise AIGradingInternalError(msg)
Esempio n. 5
0
def reschedule_unfinished_tasks(course_id=None,
                                item_id=None,
                                task_type=u"grade"):
    """
    Check for unfinished tasks (both grading and training) and reschedule them.
    Optionally restrict by course/item ID and task type. Default use case is to
    only reschedule the unfinished grade tasks. Applied use case (with button in
    staff mixin) is to call without argument, and to reschedule grades only.

    Keyword Arguments:
        course_id (unicode): Restrict to unfinished tasks in a particular course.
        item_id (unicode): Restrict to unfinished tasks for a particular item in a course.
            NOTE: if you specify the item ID, you must also specify the course ID.
        task_type (unicode): Either "grade" or "train".  Restrict to unfinished tasks of this type.
            if task_type is specified as None, both training and grading will be rescheduled, in that order.

    Raises:
        AIGradingInternalError
        AITrainingInternalError
        AIReschedulingRequestError
    """

    if course_id is None or item_id is None:
        msg = u"Rescheduling tasks was not possible because the course_id / item_id was not assigned."
        logger.exception(msg)
        raise AIReschedulingRequestError

    # Reschedules all of the training tasks
    if task_type == u"train" or task_type is None:
        try:
            training_tasks.reschedule_training_tasks.apply_async(
                args=[course_id, item_id])
        except ANTICIPATED_CELERY_ERRORS as ex:
            msg = (
                u"Rescheduling training tasks for course {cid} and item {iid} failed with exception: {ex}"
            ).format(cid=course_id, iid=item_id, ex=ex)
            logger.exception(msg)
            raise AITrainingInternalError(ex)

    # Reschedules all of the grading tasks
    if task_type == u"grade" or task_type is None:
        try:
            grading_tasks.reschedule_grading_tasks.apply_async(
                args=[course_id, item_id])
        except ANTICIPATED_CELERY_ERRORS as ex:
            msg = (
                u"Rescheduling grading tasks for course {cid} and item {iid} failed with exception: {ex}"
            ).format(cid=course_id, iid=item_id, ex=ex)
            logger.exception(msg)
            raise AIGradingInternalError(ex)
Esempio n. 6
0
    def test_automatic_grade_error(self):
        # Create some submissions which will not succeed. No classifiers yet exist.
        for _ in range(0, 10):
            submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
            ai_api.on_init(submission['uuid'],
                           rubric=RUBRIC,
                           algorithm_id=ALGORITHM_ID)

        # Check that there are unresolved grading workflows
        self._assert_complete(training_done=True, grading_done=False)

        patched_method = 'openassessment.assessment.worker.training.reschedule_grading_tasks.apply_async'
        with mock.patch(patched_method) as mocked_reschedule_grading:
            mocked_reschedule_grading.side_effect = AIGradingInternalError(
                "Kablewey.")
            with self.assertRaises(AIGradingInternalError):
                ai_api.train_classifiers(RUBRIC, EXAMPLES, COURSE_ID, ITEM_ID,
                                         ALGORITHM_ID)
Esempio n. 7
0
def get_latest_assessment(submission_uuid):
    """
    Retrieve the latest AI assessment for a submission.

    Args:
        submission_uuid (str): The UUID of the submission being assessed.

    Returns:
        dict: The serialized assessment model
        or None if no assessments are available

    Raises:
        AIGradingInternalError

    Example usage:

    >>> get_latest_assessment('10df7db776686822e501b05f452dc1e4b9141fe5')
    {
        'points_earned': 6,
        'points_possible': 12,
        'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
        'scorer': u"ease",
        'feedback': u''
    }

    """
    try:
        assessments = Assessment.objects.filter(
            submission_uuid=submission_uuid,
            score_type=AI_ASSESSMENT_TYPE,
        )[:1]
    except DatabaseError as ex:
        msg = (
            u"An error occurred while retrieving AI graded assessments "
            u"for the submission with UUID {uuid}: {ex}"
        ).format(uuid=submission_uuid, ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)

    if len(assessments) > 0:
        return full_assessment_dict(assessments[0])
    else:
        return None
Esempio n. 8
0
def get_classifier_set_info(rubric_dict, algorithm_id, course_id, item_id):
    """
    Get information about the classifier available for a particular problem.
    This is the classifier that would be selected to grade essays for the problem.

    Args:
        rubric_dict (dict): The serialized rubric model.
        algorithm_id (unicode): The algorithm to use for classification.
        course_id (unicode): The course identifier for the current problem.
        item_id (unicode): The item identifier for the current problem.

    Returns:
        dict with keys 'created_at', 'algorithm_id', 'course_id', and 'item_id'
        Note that course ID and item ID might be different than the current problem
        if a classifier from a different problem with a similar rubric
        is the best available match.

    """
    try:
        rubric = rubric_from_dict(rubric_dict)
        classifier_set = AIClassifierSet.most_recent_classifier_set(
            rubric, algorithm_id, course_id, item_id
        )
        if classifier_set is not None:
            return {
                'created_at': classifier_set.created_at,
                'algorithm_id': classifier_set.algorithm_id,
                'course_id': classifier_set.course_id,
                'item_id': classifier_set.item_id
            }
        else:
            return None
    except InvalidRubric:
        msg = u"Could not retrieve classifier set info: the rubric definition was not valid."
        logger.exception(msg)
        raise AIGradingRequestError(msg)
    except DatabaseError as ex:
        msg = u"An unexpected error occurred while retrieving classifier set info: {ex}".format(ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)
Esempio n. 9
0
def on_init(submission_uuid, rubric=None, algorithm_id=None):
    """
    Submit a response for AI assessment.
    This will:
        (a) create a workflow (database record) to track the grading task
        (b) if classifiers exist for the rubric, schedule an asynchronous grading task.

    Args:
        submission_uuid (str): The UUID of the submission to assess.

    Keyword Arguments:
        rubric (dict): Serialized rubric model.
        algorithm_id (unicode): Use only classifiers trained with the specified algorithm.

    Returns:
        grading_workflow_uuid (str): The UUID of the grading workflow.
            Usually the caller of `submit()` won't need this (since the workers
            are parameterized by grading workflow UUID), but it's
            useful for testing.

    Raises:
        AIGradingRequestError
        AIGradingInternalError

    Example Usage:

    >>> on_init('74a9d63e8a5fea369fd391d07befbd86ae4dc6e2', rubric, 'ease')
    '10df7db776686822e501b05f452dc1e4b9141fe5'

    """
    if rubric is None:
        raise AIGradingRequestError(u'No rubric provided')

    if algorithm_id is None:
        raise AIGradingRequestError(u'No algorithm ID provided')

    try:
        workflow = AIGradingWorkflow.start_workflow(submission_uuid, rubric, algorithm_id)
    except (sub_api.SubmissionNotFoundError, sub_api.SubmissionRequestError) as ex:
        msg = (
            u"An error occurred while retrieving the "
            u"submission with UUID {uuid}: {ex}"
        ).format(uuid=submission_uuid, ex=ex)
        raise AIGradingRequestError(msg)
    except InvalidRubric as ex:
        msg = (
            u"An error occurred while parsing the serialized "
            u"rubric {rubric}: {ex}"
        ).format(rubric=rubric, ex=ex)
        raise AIGradingRequestError(msg)
    except (sub_api.SubmissionInternalError, DatabaseError) as ex:
        msg = (
            u"An unexpected error occurred while submitting an "
            u"essay for AI grading: {ex}"
        ).format(ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)

    # If we find classifiers for this rubric/algorithm
    # then associate the classifiers with the workflow
    # and schedule a grading task.
    # Otherwise, the task will need to be scheduled later,
    # once the classifiers have been trained.
    if workflow.classifier_set is not None:
        try:
            grading_tasks.grade_essay.apply_async(args=[workflow.uuid])
            logger.info((
                u"Scheduled grading task for AI grading workflow with UUID {workflow_uuid} "
                u"(submission UUID = {sub_uuid}, algorithm ID = {algorithm_id})"
            ).format(workflow_uuid=workflow.uuid, sub_uuid=submission_uuid, algorithm_id=algorithm_id))
            return workflow.uuid
        except (DatabaseError,) + ANTICIPATED_CELERY_ERRORS as ex:
            msg = (
                u"An unexpected error occurred while scheduling the "
                u"AI grading task for the submission with UUID {uuid}: {ex}"
            ).format(uuid=submission_uuid, ex=ex)
            logger.exception(msg)
            raise AIGradingInternalError(msg)
    else:
        logger.info((
            u"Cannot schedule a grading task for AI grading workflow with UUID {workflow_uuid} "
            u"because no classifiers are available for the rubric associated with submission {sub_uuid} "
            u"for the algorithm {algorithm_id}"
        ).format(workflow_uuid=workflow.uuid, sub_uuid=submission_uuid, algorithm_id=algorithm_id))
Esempio n. 10
0
 def test_retrieve_params_error(self, mock_call):
     mock_call.side_effect = AIGradingInternalError("Test error")
     with self.assert_retry(grade_essay, AIGradingInternalError):
         grade_essay(self.workflow_uuid)