Example #1
0
 def test_start_workflow_no_individual_submissions(self):
     submission = dict(self.MOCK_TEAM_SUBMISSION)
     submission['submission_uuids'] = []
     with self.assertRaises(AssessmentWorkflowInternalError):
         with self.mock_submissions_api_get(submission):
             TeamAssessmentWorkflow.start_workflow(
                 self.team_submission_uuid)
Example #2
0
def _get_workflow_model(team_submission_uuid):
    """
    Returns the `TeamAssessmentWorkflow` model associated with the
    given `team_submission_uuid`.

    Raises:
        AssessmentWorkflowRequestError for incorrect arguments
        AssessmentWorkflowNotFoundError when workflow not found
        AssessmentWorkflowInternalError on error
    """
    if not isinstance(team_submission_uuid, str):
        raise AssessmentWorkflowRequestError("team_submission_uuid must be a string")

    try:
        team_workflow = TeamAssessmentWorkflow.get_by_team_submission_uuid(team_submission_uuid)
    except Exception as exc:
        err_msg = (
            "Could not get team assessment workflow with team_submission_uuid {uuid} due to error: {exc}"
        ).format(uuid=team_submission_uuid, exc=exc)
        logger.exception(err_msg)
        raise AssessmentWorkflowInternalError(err_msg)

    if team_workflow is None:
        err_msg = (
            "No team assessment workflow matching team_submission_uuid {uuid}"
        ).format(uuid=team_submission_uuid)
        raise AssessmentWorkflowNotFoundError(err_msg)

    return team_workflow
Example #3
0
def create_workflow(team_submission_uuid):
    """
    A team submission should only be assessible by staff.  Therefore, unlike
    the analogous `create_workflow()` method for individual submissions,
    we don't accept `steps` or `on_init_params` as parameters to this function,
    since those are only used to indicate which assessment steps (e.g. "peer", "self")
    are to be included in the workflow.

    Raises:
        AssessmentWorkflowInternalError on error
    """
    try:
        team_workflow = TeamAssessmentWorkflow.start_workflow(team_submission_uuid)
        logger.info((
            "Started team assessment workflow for "
            "team submission UUID {uuid}"
        ).format(uuid=team_submission_uuid))
        return team_workflow
    except Exception:
        err_msg = (
            "An unexpected error occurred while creating "
            "the workflow for team submission UUID {uuid}"
        ).format(uuid=team_submission_uuid)
        logger.exception(err_msg)
        raise AssessmentWorkflowInternalError(err_msg)
Example #4
0
    def test_update_from_assessments_old_and_new_points_equal(
            self, mock_set_team_score):
        """ There is already an equal score recorded in the submissions API """
        submissions_api_fake_score = {
            'annotations': [{
                'annotation_type':
                TeamAssessmentWorkflow.STAFF_ANNOTATION_TYPE
            }],
            'points_earned':
            9
        }
        assessment_api_fake_score = {
            "points_earned": 9,
            "points_possible": 10,
            "contributing_assessments": ['assessment_1_id'],
            "staff_id": 'staff_id',
        }
        with self.mock_submissions_api_get():
            workflow = TeamAssessmentWorkflow.start_workflow(
                self.team_submission_uuid)

        self.mock_assessment_api.assessment_is_finished.return_value = True
        self._update_from_assessments(workflow, submissions_api_fake_score,
                                      assessment_api_fake_score)
        workflow.refresh_from_db()

        self.assertEqual(workflow.status, TeamAssessmentWorkflow.STATUS.done)
        self.assertEqual(workflow._team_staff_step.assessment_completed_at,
                         now())  # pylint: disable=protected-access
        mock_set_team_score.assert_not_called()
Example #5
0
 def test_get_steps_multiple_step_error(self):
     with self.mock_submissions_api_get():
         workflow = TeamAssessmentWorkflow.start_workflow(self.team_submission_uuid)
     AssessmentWorkflowStepFactory.create(workflow=workflow)
     workflow.refresh_from_db()
     with self.assertRaises(AssessmentWorkflowInternalError):
         workflow._get_steps()  # pylint: disable=protected-access
Example #6
0
 def test_get_steps(self):
     with self.mock_submissions_api_get():
         workflow = TeamAssessmentWorkflow.start_workflow(
             self.team_submission_uuid)
     steps = workflow._get_steps()  # pylint: disable=protected-access
     self.assertEqual(len(steps), 1)
     self.assertEqual(steps[0].name,
                      TeamAssessmentWorkflow.TEAM_STAFF_STEP_NAME)
Example #7
0
 def test_get_steps_wrong_type(self):
     with self.mock_submissions_api_get():
         workflow = TeamAssessmentWorkflow.start_workflow(self.team_submission_uuid)
     step = workflow._get_steps()[0]  # pylint: disable=protected-access
     step.name = 'peer'
     step.save()
     with self.assertRaises(AssessmentWorkflowInternalError):
         workflow._get_steps()  # pylint: disable=protected-access
Example #8
0
def is_workflow_cancelled(team_submission_uuid):
    """
    Check if the team assessment workflow is cancelled
    """
    try:
        workflow = TeamAssessmentWorkflow.get_by_team_submission_uuid(team_submission_uuid)
        return workflow.is_cancelled if workflow else False
    except AssessmentWorkflowError:
        return False
Example #9
0
def cancel_workflow(team_submission_uuid, comments, cancelled_by_id):
    """
    Add an entry in AssessmentWorkflowCancellation table for a TeamAssessmentWorkflow.

    An TeamAssessmentWorkflow which has been cancelled is no longer included in the
    staff grading pool.

    Team workflows follow the same cancellation workflow,
    but operate on the reference submission.
    """
    try:
        submission_uuid = _get_workflow_model(
            team_submission_uuid).submission_uuid
        TeamAssessmentWorkflow.cancel_workflow(
            submission_uuid, comments, cancelled_by_id,
            TeamAssessmentWorkflow.REQUIREMENTS)
    except Exception as exc:
        err_msg = (
            "Could not cancel team assessment workflow with team_submission_uuid {uuid} due to error: {exc}"
        ).format(uuid=team_submission_uuid, exc=exc)
        logger.exception(err_msg)
        raise AssessmentWorkflowInternalError(err_msg) from exc
Example #10
0
    def test_start_workflow(self):
        with self.mock_submissions_api_get():
            team_workflow = TeamAssessmentWorkflow.start_workflow(self.team_submission_uuid)
        self.assertEqual(team_workflow.team_submission_uuid, self.team_submission_uuid)
        self.assertIn(team_workflow.submission_uuid, self.submission_uuids)
        self.assertEqual(team_workflow.status, TeamAssessmentWorkflow.STATUS.teams)
        self.assertEqual(team_workflow.course_id, self.course_id)
        self.assertEqual(team_workflow.item_id, self.item_id)

        step_names = [step.name for step in team_workflow.steps.all()]
        self.assertEqual(step_names, ['teams'])

        self.mock_assessment_api.on_init.assert_called_once()
Example #11
0
    def collect_ora2_responses(cls, course_id, desired_statuses=None):
        """
        Get information about all ora2 blocks in the course with response count for each step

        Args:
            course_id (string) - the course id of the course whose data we would like to return
            desired_statuses (list) - statuses to return in the result dict for each ora item

        Returns:
            A dict in the format:

            {
             'block-v1:test-org+cs101+2017_TEST+type@openassessment+block@fb668396b505470e914bad8b3178e9e7:
                 {'training': 0, 'self': 0, 'done': 2, 'peer': 1, 'staff': 0, 'total': 3},
             'block-v1:test-org+cs101+2017_TEST+type@openassessment+block@90b4edff50bc47d9ba037a3180c44e97:
                 {'training': 0, 'self': 2, 'done': 0, 'peer': 0, 'staff': 2, 'total': 4},
             ...
            }

        """

        all_valid_ora_statuses = set()
        all_valid_ora_statuses.update(AssessmentWorkflow().STATUS_VALUES)
        all_valid_ora_statuses.update(TeamAssessmentWorkflow().STATUS_VALUES)

        if desired_statuses:
            statuses = [
                st for st in all_valid_ora_statuses if st in desired_statuses
            ]
        else:
            statuses = all_valid_ora_statuses

        items = AssessmentWorkflow.objects.filter(course_id=course_id,
                                                  status__in=statuses).values(
                                                      'item_id', 'status')

        result = defaultdict(lambda: {status: 0 for status in statuses})
        for item in items:
            item_id = item['item_id']
            status = item['status']
            result[item_id]['total'] = result[item_id].get('total', 0) + 1
            if status in statuses:
                result[item_id][status] += 1

        return result
Example #12
0
    def test_update_from_assessments(self, old_score_points_earned,
                                     mock_set_team_score):
        """
        There is no score recorded in the submissions api, or the score is different than the one we
        have gotten from the assessment module
        """
        submissions_api_fake_score = None
        if old_score_points_earned:
            submissions_api_fake_score = {
                'annotations': [{
                    'annotation_type':
                    TeamAssessmentWorkflow.STAFF_ANNOTATION_TYPE
                }],
                'points_earned':
                old_score_points_earned
            }

        assessment_api_fake_score = {
            "points_earned": 9,
            "points_possible": 10,
            "contributing_assessments": ['assessment_1_id'],
            "staff_id": 'staff_id',
        }

        with self.mock_submissions_api_get():
            workflow = TeamAssessmentWorkflow.start_workflow(
                self.team_submission_uuid)

        self.mock_assessment_api.assessment_is_finished.return_value = True
        self._update_from_assessments(workflow, submissions_api_fake_score,
                                      assessment_api_fake_score)

        workflow.refresh_from_db()
        self.assertEqual(workflow.status, TeamAssessmentWorkflow.STATUS.done)
        self.assertEqual(workflow._team_staff_step.assessment_completed_at,
                         now())  # pylint: disable=protected-access
        mock_set_team_score.assert_called_with(
            self.team_submission_uuid,
            9,
            10,
            annotation_creator='staff_id',
            annotation_type=TeamAssessmentWorkflow.STAFF_ANNOTATION_TYPE,
            annotation_reason=
            'A staff member has defined the score for this submission')