示例#1
0
    def test_cancel_the_assessment_workflow_does_not_exist(self):
        # Create the submission and assessment workflow.
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])

        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            }
        }

        # Check if workflow is cancelled.
        self.assertFalse(workflow_api.is_workflow_cancelled(submission["uuid"]))
        self.assertNotEqual(workflow.get('status'), 'cancelled')

        # Cancel the workflow raises DoesNotExist.
        with self.assertRaises(workflow_api.AssessmentWorkflowError):
            workflow_api.cancel_workflow(
                submission_uuid="1234567098789",
                comments="Inappropriate language",
                cancelled_by_id=ITEM_2['student_id'],
                assessment_requirements=requirements
            )

        # Status for workflow should not be cancelled.
        workflow = AssessmentWorkflow.get_by_submission_uuid(submission["uuid"])
        self.assertNotEqual(workflow.status, 'cancelled')
示例#2
0
    def collect_ora2_responses(cls, course_id, desired_statuses=None):
        """
        Get information about all ora2 blocks in the course with response count for each step

        Args:
            course_id (string) - the course id of the course whose data we would like to return
            desired_statuses (list) - statuses to return in the result dict for each ora item

        Returns:
            A dict in the format:

            {
             'block-v1:test-org+cs101+2017_TEST+type@openassessment+block@fb668396b505470e914bad8b3178e9e7:
                 {'training': 0, 'self': 0, 'done': 2, 'peer': 1, 'staff': 0, 'total': 3},
             'block-v1:test-org+cs101+2017_TEST+type@openassessment+block@90b4edff50bc47d9ba037a3180c44e97:
                 {'training': 0, 'self': 2, 'done': 0, 'peer': 0, 'staff': 2, 'total': 4},
             ...
            }

        """
        if desired_statuses:
            statuses = [
                st for st in AssessmentWorkflow().STATUS_VALUES
                if st in desired_statuses
            ]
        else:
            statuses = AssessmentWorkflow().STATUS_VALUES

        items = AssessmentWorkflow.objects.filter(course_id=course_id,
                                                  status__in=statuses).values(
                                                      'item_id', 'status')

        result = defaultdict(lambda: {status: 0 for status in statuses})
        for item in items:
            item_id = item['item_id']
            status = item['status']
            result[item_id]['total'] = result[item_id].get('total', 0) + 1
            if status in statuses:
                result[item_id][status] += 1

        return result
示例#3
0
    def test_cancel_the_assessment_workflow(self):
        # Create the submission and assessment workflow.
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])

        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            }
        }

        # Check the workflow is not cancelled.
        self.assertFalse(workflow_api.is_workflow_cancelled(submission["uuid"]))

        # Check the status is not cancelled.
        self.assertNotEqual(workflow.get('status'), 'cancelled')

        # Check the  points_earned are not 0
        self.assertNotEqual(workflow['score'], 0)

        # Cancel the workflow for submission.
        workflow_api.cancel_workflow(
            submission_uuid=submission["uuid"],
            comments="Inappropriate language",
            cancelled_by_id=ITEM_2['student_id'],
            assessment_requirements=requirements
        )

        # Check workflow is cancelled.
        self.assertTrue(workflow_api.is_workflow_cancelled(submission["uuid"]))

        # Status for workflow should be cancelled.
        workflow = AssessmentWorkflow.get_by_submission_uuid(submission["uuid"])
        self.assertEqual(workflow.status, 'cancelled')

        # Score points_earned should be 0.
        # In case of 0 earned points the score would be None.
        self.assertEqual(workflow.score, None)
示例#4
0
    def test_cancel_the_assessment_workflow(self):
        # Create the submission and assessment workflow.
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])

        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            }
        }

        # Check the workflow is not cancelled.
        self.assertFalse(workflow_api.is_workflow_cancelled(submission["uuid"]))

        # Check the status is not cancelled.
        self.assertNotEqual(workflow.get('status'), 'cancelled')

        # Check the  points_earned are not 0
        self.assertNotEqual(workflow['score'], 0)

        # Cancel the workflow for submission.
        workflow_api.cancel_workflow(
            submission_uuid=submission["uuid"],
            comments="Inappropriate language",
            cancelled_by_id=ITEM_2['student_id'],
            assessment_requirements=requirements
        )

        # Check workflow is cancelled.
        self.assertTrue(workflow_api.is_workflow_cancelled(submission["uuid"]))

        # Status for workflow should be cancelled.
        workflow = AssessmentWorkflow.get_by_submission_uuid(submission["uuid"])
        self.assertEqual(workflow.status, 'cancelled')

        # Score points_earned should be 0.
        # In case of 0 earned points the score would be None.
        self.assertEqual(workflow.score, None)
示例#5
0
    def test_cancel_the_assessment_workflow_does_not_exist(self):
        # Create the submission and assessment workflow.
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])

        requirements = {"peer": {"must_grade": 1, "must_be_graded_by": 1}}

        # Check if workflow is cancelled.
        self.assertFalse(workflow_api.is_workflow_cancelled(
            submission["uuid"]))
        self.assertNotEqual(workflow.get('status'), 'cancelled')

        # Cancel the workflow raises DoesNotExist.
        with self.assertRaises(workflow_api.AssessmentWorkflowError):
            workflow_api.cancel_workflow(submission_uuid="1234567098789",
                                         comments="Inappropriate language",
                                         cancelled_by_id=ITEM_2['student_id'],
                                         assessment_requirements=requirements)

        # Status for workflow should not be cancelled.
        workflow = AssessmentWorkflow.get_by_submission_uuid(
            submission["uuid"])
        self.assertNotEqual(workflow.status, 'cancelled')
def main():
    from django.contrib.auth.models import User
    from openassessment.assessment.models import Assessment, AssessmentPart, StaffWorkflow
    from openassessment.workflow.models import AssessmentWorkflow, AssessmentWorkflowStep
    from student.models import anonymous_id_for_user, user_by_anonymous_id
    from submissions.models import Score, ScoreSummary, ScoreAnnotation, Submission

    old_scores = Score.objects.filter(submission__isnull=True, reset=False).order_by('id')
    updated_count = 0
    for score in old_scores:
        try:
            with transaction.atomic():
                # ScoreSummary is updated on Score saves but for this script we don't want that.
                # Correct way is to disconnect post_save signal, but since the receiver function
                # is defined in the class, we can't reference it. Workaround here is to just
                # prefetch the score summary and resave it to maintain its original field values.
                score_summary = ScoreSummary.objects.get(student_item=score.student_item)

                # Update old override with submission from the score preceding it.
                # If none exists, look for it in the submissions table.
                preceding_score = Score.objects.filter(
                    student_item=score.student_item,
                    created_at__lte=score.created_at,
                    submission__isnull=False,
                ).order_by('-created_at')[:1]
                if preceding_score.count():
                    submission = preceding_score.get().submission
                else:
                    submission_qset = Submission.objects.filter(student_item=score.student_item)
                    if submission_qset.count() > 1:
                        raise Exception("MULTIPLE SUBMISSIONS FOR STUDENT_ITEM {}".format(score.student_item))
                    else:
                        submission = submission_qset[:1].get()
                score.submission = submission
                score.save()

                # Offset override reset by 1 second for convenience when sorting db
                override_date = score.created_at - datetime.timedelta(seconds=1)
                # Create reset score
                Score.objects.create(
                    student_item=score.student_item,
                    submission=None,
                    points_earned=0,
                    points_possible=0,
                    created_at=override_date,
                    reset=True,
                )

                # Restore original score summary values
                score_summary.save()

                # Fetch staff id from score course for ScoreAnnotation
                course_id = CourseKey.from_string(score.student_item.course_id)
                staff = User.objects.filter(
                    courseaccessrole__role__in=['instructor', 'staff'],
                    courseaccessrole__course_id=course_id,
                )[:1].get()
                staff_id = anonymous_id_for_user(staff, course_id, save=False)

                # Create ScoreAnnotation
                score_annotation = ScoreAnnotation(
                    score=score,
                    annotation_type="staff_defined",
                    creator=staff_id,
                    reason="A staff member has defined the score for this submission",
                )
                score_annotation.save()

                # ORA2 Table Updates...
                # Fetch rubric from an existing assessment
                assessment = Assessment.objects.filter(submission_uuid=submission.uuid)[:1].get()
                rubric = assessment.rubric

                staff_assessment = Assessment.create(
                    rubric=rubric,
                    scorer_id=staff_id,
                    submission_uuid=submission.uuid,
                    score_type="ST",
                    scored_at=override_date,
                )

                # Fake criterion selections
                rubric_index = rubric.index
                assessment_parts = []

                criteria_without_options = rubric_index.find_criteria_without_options()
                criteria_with_options = set(rubric_index._criteria_index.values()) - criteria_without_options
                ordered_criteria = sorted(criteria_with_options, key=lambda criterion: criterion.order_num)
                criteria_options = [c.options.all() for c in ordered_criteria]
                # Just take the first combination of options which add up to the override point score
                for selection in itertools.product(*criteria_options):
                    total = sum(option.points for option in selection)
                    if total == score.points_earned:
                        for option in selection:
                            assessment_parts.append({
                                'criterion': option.criterion,
                                'option': option
                            })
                        break

                # Default to first option for each criteria if no matching sum found
                if not assessment_parts:
                    print "NO CLEAN SUM FOR SUBMISSION " + submission.uuid
                    for options in criteria_options:
                        assessment_parts.append({
                            'criterion': options[0].criterion,
                            'option': options[0],
                        })

                # Add feedback-only criteria
                for criterion in criteria_without_options:
                    assessment_parts.append({
                        'criterion': criterion,
                        'option': None
                    })

                AssessmentPart.objects.bulk_create([
                    AssessmentPart(
                        assessment=staff_assessment,
                        criterion=assessment_part['criterion'],
                        option=assessment_part['option'],
                        feedback=u""
                    )
                    for assessment_part in assessment_parts
                ])

                try:
                    staff_workflow = StaffWorkflow.objects.get(submission_uuid=submission.uuid)
                    staff_workflow.assessment = staff_assessment.id
                    staff_workflow.grading_completed_at = override_date
                except StaffWorkflow.DoesNotExist:
                    staff_workflow = StaffWorkflow(
                        scorer_id=staff_id,
                        course_id=score.student_item.course_id,
                        item_id=score.student_item.item_id,
                        submission_uuid=submission.uuid,
                        created_at=override_date,
                        grading_completed_at=override_date,
                        assessment=staff_assessment.id,
                    )
                staff_workflow.save()

                workflow = AssessmentWorkflow.get_by_submission_uuid(submission.uuid)
                try:
                    staff_step = workflow.steps.get(name='staff')
                    staff_step.assessment_completed_at = score.created_at
                    staff_step.submitter_completed_at = score.created_at
                    staff_step.save()
                except AssessmentWorkflowStep.DoesNotExist:
                    for step in workflow.steps.all():
                        step.assessment_completed_at = score.created_at
                        step.submitter_completed_at = score.created_at
                        step.order_num += 1
                        step.save()
                    workflow.steps.add(
                        AssessmentWorkflowStep(
                            name='staff',
                            order_num=0,
                            assessment_completed_at=score.created_at,
                            submitter_completed_at=score.created_at,
                        )
                    )

                # Update workflow status to done if it wasn't subsequently cancelled
                if workflow.status != 'cancelled':
                    workflow.status = 'done'
                    workflow.save()

            updated_count += 1
            user = user_by_anonymous_id(score.student_item.student_id)
            print(
                "Successfully updated score {} for user {} with email {} in course {} for item: {}".format(
                    score.id,
                    user.username,
                    user.email,
                    score.student_item.course_id,
                    score.student_item.item_id,
                )
            )
        except Exception as err:
            print("An error occurred updating score {}: {}".format(score.id, err))
            print("Please update this score manually and retry script.")
            break

    print("Script finished, number of scores updated: {}.".format(updated_count))