Esempio n. 1
0
    def test_errors(self, error, mock_call):
        # Start a workflow for the submission
        workflow_api.create_workflow(self.submission_uuid, ['self'])

        # The receiver should catch and log the error
        mock_call.side_effect = error("OH NO!")
        assessment_complete_signal.send(sender=None, submission_uuid=self.submission_uuid)
Esempio n. 2
0
def create_assessment(grading_workflow_uuid, criterion_scores):
    """
    Create an AI assessment (complete the AI grading task).

    Args:
        grading_workflow_uuid (str): The UUID of the grading workflow.
        criterion_scores (dict): Dictionary mapping criteria names to integer scores.

    Returns:
        None

    Raises:
        AIGradingRequestError
        AIGradingInternalError

    """
    try:
        workflow = AIGradingWorkflow.objects.get(uuid=grading_workflow_uuid)
    except AIGradingWorkflow.DoesNotExist:
        msg = (
            u"Could not retrieve the AI grading workflow with uuid {}"
        ).format(grading_workflow_uuid)
        raise AIGradingRequestError(msg)
    except DatabaseError as ex:
        msg = (
            u"An unexpected error occurred while retrieving the "
            u"AI grading workflow with uuid {uuid}: {ex}"
        ).format(uuid=grading_workflow_uuid, ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)

    # Optimization: if the workflow has already been marked complete
    # (perhaps the task was picked up by multiple workers),
    # then we don't need to do anything.
    # Otherwise, create the assessment mark the workflow complete.
    try:
        if not workflow.is_complete:
            workflow.complete(criterion_scores)
            logger.info((
                u"Created assessment for AI grading workflow with UUID {workflow_uuid} "
                u"(algorithm ID {algorithm_id})"
            ).format(workflow_uuid=workflow.uuid, algorithm_id=workflow.algorithm_id))
        else:
            msg = u"Grading workflow with UUID {} is already marked complete".format(workflow.uuid)
            logger.info(msg)
    except DatabaseError as ex:
        msg = (
            u"An unexpected error occurred while creating the assessment "
            u"for AI grading workflow with uuid {uuid}: {ex}"
        ).format(uuid=grading_workflow_uuid, ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)

    # Fire a signal to update the workflow API
    # This will allow students to receive a score if they're
    # waiting on an AI assessment.
    # The signal receiver is responsible for catching and logging
    # all exceptions that may occur when updating the workflow.
    from openassessment.assessment.signals import assessment_complete_signal
    assessment_complete_signal.send(sender=None, submission_uuid=workflow.submission_uuid)
Esempio n. 3
0
def create_assessment(grading_workflow_uuid, criterion_scores):
    """
    Create an AI assessment (complete the AI grading task).

    Args:
        grading_workflow_uuid (str): The UUID of the grading workflow.
        criterion_scores (dict): Dictionary mapping criteria names to integer scores.

    Returns:
        None

    Raises:
        AIGradingRequestError
        AIGradingInternalError

    """
    try:
        workflow = AIGradingWorkflow.objects.get(uuid=grading_workflow_uuid)
    except AIGradingWorkflow.DoesNotExist:
        msg = (u"Could not retrieve the AI grading workflow with uuid {}"
               ).format(grading_workflow_uuid)
        raise AIGradingRequestError(msg)
    except DatabaseError as ex:
        msg = (u"An unexpected error occurred while retrieving the "
               u"AI grading workflow with uuid {uuid}: {ex}").format(
                   uuid=grading_workflow_uuid, ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)

    # Optimization: if the workflow has already been marked complete
    # (perhaps the task was picked up by multiple workers),
    # then we don't need to do anything.
    # Otherwise, create the assessment mark the workflow complete.
    try:
        if not workflow.is_complete:
            workflow.complete(criterion_scores)
            logger.info((
                u"Created assessment for AI grading workflow with UUID {workflow_uuid} "
                u"(algorithm ID {algorithm_id})").format(
                    workflow_uuid=workflow.uuid,
                    algorithm_id=workflow.algorithm_id))
        else:
            msg = u"Grading workflow with UUID {} is already marked complete".format(
                workflow.uuid)
            logger.info(msg)
    except DatabaseError as ex:
        msg = (u"An unexpected error occurred while creating the assessment "
               u"for AI grading workflow with uuid {uuid}: {ex}").format(
                   uuid=grading_workflow_uuid, ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)

    # Fire a signal to update the workflow API
    # This will allow students to receive a score if they're
    # waiting on an AI assessment.
    # The signal receiver is responsible for catching and logging
    # all exceptions that may occur when updating the workflow.
    from openassessment.assessment.signals import assessment_complete_signal
    assessment_complete_signal.send(sender=None,
                                    submission_uuid=workflow.submission_uuid)
Esempio n. 4
0
    def test_errors(self, error, mock_call):
        # Start a workflow for the submission
        workflow_api.create_workflow(self.submission_uuid, ['self'])

        # The receiver should catch and log the error
        mock_call.side_effect = error("OH NO!")
        assessment_complete_signal.send(sender=None,
                                        submission_uuid=self.submission_uuid)
Esempio n. 5
0
    def test_update_signal_updates_workflow(self):
        # Start a workflow for the submission
        workflow_api.create_workflow(self.submission_uuid, ['self'])

        # Spy on the workflow update call
        with mock.patch.object(AssessmentWorkflow, 'update_from_assessments') as mock_update:

            # Send a signal to update the workflow
            assessment_complete_signal.send(sender=None, submission_uuid=self.submission_uuid)

            # Verify that the workflow model update was called
            mock_update.assert_called_once_with(None)
Esempio n. 6
0
    def test_update_signal_updates_workflow(self):
        # Start a workflow for the submission
        workflow_api.create_workflow(self.submission_uuid, ['self'])

        # Spy on the workflow update call
        with mock.patch.object(AssessmentWorkflow,
                               'update_from_assessments') as mock_update:

            # Send a signal to update the workflow
            assessment_complete_signal.send(
                sender=None, submission_uuid=self.submission_uuid)

            # Verify that the workflow model update was called
            mock_update.assert_called_once_with(None)
Esempio n. 7
0
 def test_update_signal_no_submission_uuid(self):
     # Try to send the signal without specifying a submission UUID
     # The receiver should catch and log the exception
     assessment_complete_signal.send(sender=None)
Esempio n. 8
0
 def test_update_signal_no_workflow(self):
     # Without defining a workflow, send the signal
     # The receiver should catch and log the exception
     assessment_complete_signal.send(sender=None,
                                     submission_uuid=self.submission_uuid)
Esempio n. 9
0
 def test_update_signal_no_submission_uuid(self):
     # Try to send the signal without specifying a submission UUID
     # The receiver should catch and log the exception
     assessment_complete_signal.send(sender=None)
Esempio n. 10
0
 def test_update_signal_no_workflow(self):
     # Without defining a workflow, send the signal
     # The receiver should catch and log the exception
     assessment_complete_signal.send(sender=None, submission_uuid=self.submission_uuid)