def test_staff_debug_student_info_peer_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime =  self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'], dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Now Bob should be fully populated in the student info view.
        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer", context['submission']['answer']['text'])
        self.assertIsNone(context['self_assessment'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Esempio n. 2
0
    def test_staff_debug_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime =  self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        path, context = xblock.get_student_info_path_and_context(request)
        self.assertEquals("Bob Answer", context['submission']['answer']['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Esempio n. 3
0
    def test_staff_debug_student_info_peer_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime =  self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'], dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Now Bob should be fully populated in the student info view.
        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer", context['submission']['answer']['text'])
        self.assertIsNone(context['self_assessment'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Esempio n. 4
0
    def test_cancelled_submission_peer_assessment_render_path(self, xblock):
        # Test that peer assessment path should be oa_peer_cancelled.html for a cancelled submission.
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text': "Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            },
        }

        workflow_api.cancel_workflow(
            submission_uuid=submission['uuid'],
            comments="Inappropriate language",
            cancelled_by_id=bob_item['student_id'],
            assessment_requirements=requirements
        )

        xblock.submission_uuid = submission["uuid"]
        path, context = xblock.peer_path_and_context(False)
        self.assertEquals("openassessmentblock/peer/oa_peer_cancelled.html", path)
Esempio n. 5
0
 def _create_student_and_submission(student, answer, date=None):
     new_student_item = STUDENT_ITEM.copy()
     new_student_item["student_id"] = student
     submission = sub_api.create_submission(new_student_item, answer, date)
     peer_api.create_peer_workflow(submission["uuid"])
     workflow_api.create_workflow(submission["uuid"])
     return submission, new_student_item
Esempio n. 6
0
    def test_many_submissions(self):
        # Create a lot of submissions
        num_submissions = 234
        for index in range(num_submissions):
            student_item = {
                'student_id': "test_user_{}".format(index),
                'course_id': 'test_course',
                'item_id': 'test_item',
                'item_type': 'openassessment',
            }
            submission_text = "test submission {}".format(index)
            submission = sub_api.create_submission(student_item, submission_text)
            workflow_api.create_workflow(submission['uuid'], ['peer', 'self'])

        # Generate a CSV file for the submissions
        output_streams = self._output_streams(['submission'])
        writer = CsvWriter(output_streams)
        writer.write_to_csv('test_course')

        # Parse the generated CSV
        content = output_streams['submission'].getvalue()
        rows = content.split('\n')

        # Remove the first row (header) and last row (blank line)
        rows = rows[1:-1]

        # Check that we have the right number of rows
        self.assertEqual(len(rows), num_submissions)
Esempio n. 7
0
    def test_staff_area_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob")
        xblock.runtime._services['user'] = NullUserService()
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(
            bob_item,
            prepare_submission_for_serialization(
                ("Bob Answer 1", "Bob Answer 2")))
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1",
                          context['submission']['answer']['parts'][0]['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_area/student_info.html",
                          path)
Esempio n. 8
0
    def test_staff_debug_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2"))
        )
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Esempio n. 9
0
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        # Commonly chosen options for assessments
        options_selected = {
            "Ideas": "Good",
            "Content": "Poor",
        }

        criterion_feedback = {
            "Ideas": "Dear diary: Lots of creativity from my dream journal last night at 2 AM,",
            "Content": "Not as insightful as I had thought in the wee hours of the morning!"
        }

        overall_feedback = "I think I should tell more people about how important worms are for the ecosystem."

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            options_selected, dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            options_selected,
            criterion_feedback,
            overall_feedback,
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Esempio n. 10
0
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        # Commonly chosen options for assessments
        options_selected = {
            "Ideas": "Good",
            "Content": "Poor",
        }

        criterion_feedback = {
            "Ideas": "Dear diary: Lots of creativity from my dream journal last night at 2 AM,",
            "Content": "Not as insightful as I had thought in the wee hours of the morning!"
        }

        overall_feedback = "I think I should tell more people about how important worms are for the ecosystem."

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            options_selected, dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            options_selected,
            criterion_feedback,
            overall_feedback,
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Esempio n. 11
0
    def test_cancel_submission_full_flow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text': "Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        incorrect_submission_uuid = 'abc'
        params = {"submission_uuid": incorrect_submission_uuid, "comments": "Inappropriate language."}
        # Raise flow not found exception.
        resp = self.request(xblock, 'cancel_submission', json.dumps(params), response_format='json')
        self.assertIn("Error finding workflow", resp['msg'])
        self.assertEqual(False, resp['success'])

        # Verify that we can render without error
        params = {"submission_uuid": submission["uuid"], "comments": "Inappropriate language."}
        resp = self.request(xblock, 'cancel_submission', json.dumps(params), response_format='json')
        self.assertIn("The student submission has been removed from peer", resp['msg'])
        self.assertEqual(True, resp['success'])
Esempio n. 12
0
    def test_errors(self, error, mock_call):
        # Start a workflow for the submission
        workflow_api.create_workflow(self.submission_uuid, ['self'])

        # The receiver should catch and log the error
        mock_call.side_effect = error("OH NO!")
        assessment_complete_signal.send(sender=None, submission_uuid=self.submission_uuid)
Esempio n. 13
0
    def _create_submission(item, values, types):
        """ Create a submission and corresponding workflow. """
        submission = sub_api.create_submission(item, values)

        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], types)
        return submission
Esempio n. 14
0
    def test_staff_debug_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, "Bob")

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item,
                                               {'text': "Bob Answer"})
        peer_api.create_peer_workflow(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        path, context = xblock.get_student_info_path_and_context(request)
        self.assertEquals("Bob Answer",
                          context['submission']['answer']['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html",
                          path)
Esempio n. 15
0
    def test_cancelled_submission_peer_assessment_render_path(self, xblock):
        # Test that peer assessment path should be oa_peer_cancelled.html for a cancelled submission.
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob")

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item,
                                               {'text': "Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            },
        }

        workflow_api.cancel_workflow(submission_uuid=submission['uuid'],
                                     comments="Inappropriate language",
                                     cancelled_by_id=bob_item['student_id'],
                                     assessment_requirements=requirements)

        xblock.submission_uuid = submission["uuid"]
        path, context = xblock.peer_path_and_context(False)
        self.assertEquals("openassessmentblock/peer/oa_peer_cancelled.html",
                          path)
Esempio n. 16
0
    def create_workflow(self, submission_uuid):
        """
        Create a new workflow for a student submission.

        Args:
            submission_uuid (str): The UUID of the submission to associate
                with the workflow.

        Returns:
            None

        """
        steps = self._create_step_list()
        ai_module = self.get_assessment_module('example-based-assessment')
        on_init_params = {
            'ai': {
                'rubric':
                create_rubric_dict(self.prompts,
                                   self.rubric_criteria_with_labels),
                'algorithm_id':
                ai_module["algorithm_id"] if ai_module else None
            }
        }
        workflow_api.create_workflow(submission_uuid,
                                     steps,
                                     on_init_params=on_init_params)
Esempio n. 17
0
    def _create_submission(item, values, types):
        """ Create a submission and corresponding workflow. """
        submission = sub_api.create_submission(item, values)

        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], types)
        return submission
Esempio n. 18
0
    def test_staff_area_student_info_with_cancelled_submission(self, xblock):
        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            },
        }

        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob")
        xblock.runtime._services['user'] = NullUserService()

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(
            bob_item,
            prepare_submission_for_serialization(
                ("Bob Answer 1", "Bob Answer 2")))
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        workflow_api.cancel_workflow(submission_uuid=submission["uuid"],
                                     comments="Inappropriate language",
                                     cancelled_by_id=bob_item['student_id'],
                                     assessment_requirements=requirements)

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1",
                          context['submission']['answer']['parts'][0]['text'])
        self.assertIsNotNone(context['workflow_cancellation'])
        self.assertEquals("openassessmentblock/staff_area/student_info.html",
                          path)
Esempio n. 19
0
    def test_staff_debug_student_info_with_cancelled_submission(self, xblock):
        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            },
        }

        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2"))
        )
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        workflow_api.cancel_workflow(
            submission_uuid=submission["uuid"],
            comments="Inappropriate language",
            cancelled_by_id=bob_item['student_id'],
            assessment_requirements=requirements
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
        self.assertIsNotNone(context['workflow_cancellation'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Esempio n. 20
0
    def test_need_valid_submission_uuid(self, data):
        # submission doesn't exist
        with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
            workflow = workflow_api.create_workflow("xxxxxxxxxxx", data["steps"])

        # submission_uuid is the wrong type
        with self.assertRaises(workflow_api.AssessmentWorkflowRequestError):
            workflow = workflow_api.create_workflow(123, data["steps"])
Esempio n. 21
0
    def test_errors(self, error, mock_call):
        # Start a workflow for the submission
        workflow_api.create_workflow(self.submission_uuid, ['self'])

        # The receiver should catch and log the error
        mock_call.side_effect = error("OH NO!")
        assessment_complete_signal.send(sender=None,
                                        submission_uuid=self.submission_uuid)
Esempio n. 22
0
 def _create_student_and_submission(student, answer):
     """
     Helper method to create a student and submission for use in tests.
     """
     new_student_item = StaffGraderMixinTestBase._student_item_dict(student)
     submission = sub_api.create_submission(new_student_item, answer)
     workflow_api.create_workflow(submission["uuid"], ['staff'])
     return submission, new_student_item
Esempio n. 23
0
 def test_ai_score_set(self, mock_score, mock_is_finished):
     submission = sub_api.create_submission(ITEM_1, "Ultra Magnus fumble")
     mock_is_finished.return_value = True
     score = {"points_earned": 7, "points_possible": 10}
     mock_score.return_value = score
     workflow_api.create_workflow(submission["uuid"], ["ai"], ON_INIT_PARAMS)
     workflow = workflow_api.get_workflow_for_submission(submission["uuid"], {})
     self.assertEquals(workflow["score"]["points_earned"], score["points_earned"])
     self.assertEquals(workflow["score"]["points_possible"], score["points_possible"])
Esempio n. 24
0
 def test_create_ai_workflow_no_rubric(self, rubric, algorithm_id):
     submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
     on_init_params = {
         'ai': {
             'rubric': rubric,
             'algorithm_id': algorithm_id,
         }
     }
     workflow_api.create_workflow(submission["uuid"], ["ai"], on_init_params)
Esempio n. 25
0
 def _create_submission(self, student_item_dict):
     """
     Creates a submission and initializes a peer grading workflow.
     """
     submission = sub_api.create_submission(student_item_dict, ANSWER)
     submission_uuid = submission['uuid']
     peer_api.on_start(submission_uuid)
     workflow_api.create_workflow(submission_uuid, STEPS)
     return submission
Esempio n. 26
0
 def test_create_ai_workflow_no_rubric(self, rubric, algorithm_id):
     submission = sub_api.create_submission(ITEM_1, ANSWER_1)
     on_init_params = {
         'ai': {
             'rubric': rubric,
             'algorithm_id': algorithm_id,
         }
     }
     workflow_api.create_workflow(submission["uuid"], ["ai"], on_init_params)
Esempio n. 27
0
 def test_ai_score_set(self, mock_score, mock_is_finished):
     submission = sub_api.create_submission(ITEM_1, ANSWER_2)
     mock_is_finished.return_value = True
     score = {"points_earned": 7, "points_possible": 10}
     mock_score.return_value = score
     workflow_api.create_workflow(submission["uuid"], ["ai"], ON_INIT_PARAMS)
     workflow = workflow_api.get_workflow_for_submission(submission["uuid"], {})
     self.assertEquals(workflow["score"]["points_earned"], score["points_earned"])
     self.assertEquals(workflow["score"]["points_possible"], score["points_possible"])
Esempio n. 28
0
    def test_unable_to_load_api(self):
        submission = sub_api.create_submission({
            "student_id": "test student",
            "course_id": "test course",
            "item_id": "test item",
            "item_type": "openassessment",
        }, "test answer")

        with self.assertRaises(AssessmentWorkflowInternalError):
            workflow_api.create_workflow(submission['uuid'], ['self'])
Esempio n. 29
0
    def test_unable_to_load_api(self):
        submission = sub_api.create_submission({
            "student_id": "test student",
            "course_id": "test course",
            "item_id": "test item",
            "item_type": "openassessment",
        }, "test answer")

        with self.assertRaises(AssessmentWorkflowInternalError):
            workflow_api.create_workflow(submission['uuid'], ['self'], ON_INIT_PARAMS)
    def submit_ora_test_data(self, course_id, submissions_config):
        """
        Run the submit action. For each specified submission, create the submission, create an assessment if specified,
        and create a lock if specified.
        """
        for ora_config in submissions_config:
            log.info('Creating test submissions for course %s', course_id)
            for submission_config in ora_config['submissions']:
                log.info("Creating submission for user %s",
                         submission_config['username'])
                student_item = self.student_item(submission_config['username'],
                                                 course_id,
                                                 ora_config['displayName'])
                # Submissions consist of username, a line break, and then some lorem
                text_response = submission_config[
                    'username'] + '\n' + generate_lorem_sentences()
                submission = sub_api.create_submission(
                    student_item, {'parts': [{
                        'text': text_response
                    }]})
                workflow_api.create_workflow(submission['uuid'], ['staff'])
                workflow_api.update_from_assessments(submission['uuid'], None)
                log.info("Created submission %s for user %s",
                         submission['uuid'], submission_config['username'])

                if submission_config['lockOwner']:
                    log.info("Creating lock on submission %s owned by %s",
                             submission['uuid'],
                             submission_config['lockOwner'])
                    SubmissionGradingLock.claim_submission_lock(
                        submission['uuid'], self.username_to_anonymous_user_id[
                            submission_config['lockOwner']])

                if submission_config['gradeData']:
                    grade_data = submission_config['gradeData']
                    log.info(
                        "Creating assessment from user %s for submission %s",
                        grade_data['gradedBy'], submission['uuid'])
                    block = self.display_name_to_block[
                        ora_config['displayName']]
                    rubric_dict = create_rubric_dict(
                        block.prompts, block.rubric_criteria_with_labels)
                    options_selected, criterion_feedback = self.api_format_criteria(
                        grade_data['criteria'], rubric_dict)
                    staff_api.create_assessment(
                        submission['uuid'],
                        self.username_to_anonymous_user_id[
                            grade_data['gradedBy']],
                        options_selected,
                        criterion_feedback,
                        grade_data['overallFeedback'],
                        rubric_dict,
                    )
                    workflow_api.update_from_assessments(
                        submission['uuid'], None)
Esempio n. 31
0
    def create_peer_submissions(self, student_item, peer_names, submission_text):
        """Create len(peer_names) submissions, and return them."""
        returned_subs = []
        for peer in peer_names:
            scorer = copy.deepcopy(student_item)
            scorer['student_id'] = peer

            scorer_sub = submissions_api.create_submission(scorer, {'text': submission_text})
            returned_subs.append(scorer_sub)
            workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)
        return returned_subs
Esempio n. 32
0
    def create_peer_submissions(self, student_item, peer_names, submission_text):
        """Create len(peer_names) submissions, and return them."""
        returned_subs = []
        for peer in peer_names:
            scorer = copy.deepcopy(student_item)
            scorer['student_id'] = peer

            scorer_sub = submissions_api.create_submission(scorer, {'text': submission_text})
            returned_subs.append(scorer_sub)
            workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)
        return returned_subs
Esempio n. 33
0
    def test_update_signal_updates_workflow(self):
        # Start a workflow for the submission
        workflow_api.create_workflow(self.submission_uuid, ['self'])

        # Spy on the workflow update call
        with mock.patch.object(AssessmentWorkflow, 'update_from_assessments') as mock_update:

            # Send a signal to update the workflow
            assessment_complete_signal.send(sender=None, submission_uuid=self.submission_uuid)

            # Verify that the workflow model update was called
            mock_update.assert_called_once_with(None)
Esempio n. 34
0
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        # Commonly chosen options for assessments
        options_selected = {
            "Ideas": "Good",
            "Content": "Poor",
        }

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            options_selected, dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            options_selected,
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Esempio n. 35
0
    def create_workflow(self, submission_uuid):
        """
        Create a new workflow for a student submission.

        Args:
            submission_uuid (str): The UUID of the submission to associate
                with the workflow.

        Returns:
            None

        """
        steps = self._create_step_list()
        workflow_api.create_workflow(submission_uuid, steps)
Esempio n. 36
0
    def test_update_signal_updates_workflow(self):
        # Start a workflow for the submission
        workflow_api.create_workflow(self.submission_uuid, ['self'])

        # Spy on the workflow update call
        with mock.patch.object(AssessmentWorkflow,
                               'update_from_assessments') as mock_update:

            # Send a signal to update the workflow
            assessment_complete_signal.send(
                sender=None, submission_uuid=self.submission_uuid)

            # Verify that the workflow model update was called
            mock_update.assert_called_once_with(None)
Esempio n. 37
0
    def create_workflow(self, submission_uuid):
        """
        Create a new workflow for a student submission.

        Args:
            submission_uuid (str): The UUID of the submission to associate
                with the workflow.

        Returns:
            None

        """
        steps = self._create_step_list()
        workflow_api.create_workflow(submission_uuid, steps, on_init_params={})
Esempio n. 38
0
 def _create_test_user(cls, identifier, user_type, create_submission=True):
     """ Create a TestUser, a namedtuple with a student_id, username, and potentially a submission """
     student_id = f"SWLV_{user_type}_{identifier}_student_id"
     if create_submission:
         student_item = cls._student_item(student_id)
         submission = sub_api.create_submission(student_item, ANSWER)
         workflow_api.create_workflow(submission["uuid"], ['staff'])
     else:
         submission = None
     return TestUser(
         username=f"SWLV_{user_type}_{identifier}_username",
         student_id=student_id,
         submission=submission,
     )
    def test_upload(self):
        # Create an S3 bucket using the fake S3 implementation
        conn = boto3.client("s3")
        conn.create_bucket(Bucket=self.BUCKET_NAME)

        # Create some submissions to ensure that we cover
        # the progress indicator code.
        for index in range(50):
            student_item = {
                'student_id': f"test_user_{index}",
                'course_id': self.COURSE_ID,
                'item_id': 'test_item',
                'item_type': 'openassessment',
            }
            submission_text = f"test submission {index}"
            submission = sub_api.create_submission(student_item,
                                                   submission_text)
            workflow_api.create_workflow(submission['uuid'], ['peer', 'self'])

        # Create and upload the archive of CSV files
        # This should generate the files even though
        # we don't have any data available.
        cmd = upload_oa_data.Command()
        cmd.handle(self.COURSE_ID.encode('utf-8'), self.BUCKET_NAME)

        # Retrieve the uploaded file from the fake S3 implementation
        self.assertEqual(len(cmd.history), 1)
        bucket = conn.list_buckets()["Buckets"][0]["Name"]
        key = conn.list_objects(Bucket=bucket)["Contents"][0]["Key"]
        contents = BytesIO(
            conn.get_object(Bucket=self.BUCKET_NAME, Key=key)["Body"].read())

        # Expect that the contents contain all the expected CSV files
        with tarfile.open(mode="r:gz", fileobj=contents) as tar:
            file_sizes = {
                member.name: member.size
                for member in tar.getmembers()
            }
            for csv_name in self.CSV_NAMES:
                self.assertIn(csv_name, file_sizes)
                self.assertGreater(file_sizes[csv_name], 0)

        # Expect that we generated a URL for the bucket
        url = cmd.history[0]['url']
        parsed_url = urlparse(url)
        self.assertEqual("https", parsed_url.scheme)
        self.assertIn(parsed_url.netloc,
                      ["s3.eu-west-1.amazonaws.com", "s3.amazonaws.com"])
        self.assertIn(f"/{self.BUCKET_NAME}", parsed_url.path)
Esempio n. 40
0
 def _create_student_and_submission(student, answer, date=None, problem_steps=None):
     """
     Helper method to create a student and submission for use in tests.
     """
     new_student_item = STUDENT_ITEM.copy()
     new_student_item["student_id"] = student
     submission = sub_api.create_submission(new_student_item, answer, date)
     steps = []
     init_params = {}
     if problem_steps:
         steps = problem_steps
     if 'peer' in steps:
         peer_api.on_start(submission["uuid"])
     workflow_api.create_workflow(submission["uuid"], steps, init_params)
     return submission, new_student_item
Esempio n. 41
0
 def _create_student_and_submission(student, answer, date=None, problem_steps=None):
     """
     Helper method to create a student and submission for use in tests.
     """
     new_student_item = STUDENT_ITEM.copy()
     new_student_item["student_id"] = student
     submission = sub_api.create_submission(new_student_item, answer, date)
     steps = []
     init_params = {}
     if problem_steps:
         steps = problem_steps
     if 'peer' in steps:
         peer_api.on_start(submission["uuid"])
     workflow_api.create_workflow(submission["uuid"], steps, init_params)
     return submission, new_student_item
Esempio n. 42
0
    def test_cancel_the_assessment_workflow_does_not_exist(self):
        # Create the submission and assessment workflow.
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])

        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            }
        }

        # Check if workflow is cancelled.
        self.assertFalse(workflow_api.is_workflow_cancelled(submission["uuid"]))
        self.assertNotEqual(workflow.get('status'), 'cancelled')

        # Cancel the workflow raises DoesNotExist.
        with self.assertRaises(workflow_api.AssessmentWorkflowError):
            workflow_api.cancel_workflow(
                submission_uuid="1234567098789",
                comments="Inappropriate language",
                cancelled_by_id=ITEM_2['student_id'],
                assessment_requirements=requirements
            )

        # Status for workflow should not be cancelled.
        workflow = AssessmentWorkflow.get_by_submission_uuid(submission["uuid"])
        self.assertNotEqual(workflow.status, 'cancelled')
Esempio n. 43
0
    def test_cancel_the_assessment_workflow_does_not_exist(self):
        # Create the submission and assessment workflow.
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])

        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            }
        }

        # Check if workflow is cancelled.
        self.assertFalse(workflow_api.is_workflow_cancelled(submission["uuid"]))
        self.assertNotEqual(workflow.get('status'), 'cancelled')

        # Cancel the workflow raises DoesNotExist.
        with self.assertRaises(workflow_api.AssessmentWorkflowError):
            workflow_api.cancel_workflow(
                submission_uuid="1234567098789",
                comments="Inappropriate language",
                cancelled_by_id=ITEM_2['student_id'],
                assessment_requirements=requirements
            )

        # Status for workflow should not be cancelled.
        workflow = AssessmentWorkflow.get_by_submission_uuid(submission["uuid"])
        self.assertNotEqual(workflow.status, 'cancelled')
Esempio n. 44
0
    def test_create_workflow(self, data):
        first_step = data["steps"][0] if data["steps"] else "peer"
        if "ai" in data["steps"]:
            first_step = data["steps"][1] if len(
                data["steps"]) > 1 else "waiting"
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"],
                                                data["steps"])

        workflow_keys = set(workflow.keys())
        self.assertEqual(
            workflow_keys, {
                'submission_uuid', 'status', 'created', 'modified', 'score',
                'assessment_score_priority'
            })
        self.assertEqual(workflow["submission_uuid"], submission["uuid"])
        self.assertEqual(workflow["status"], first_step)

        workflow_from_get = workflow_api.get_workflow_for_submission(
            submission["uuid"], data["requirements"])
        del workflow_from_get['status_details']
        self.assertEqual(workflow, workflow_from_get)

        # Test that the Peer Workflow is, or is not created, based on when peer
        # is a step in the workflow.
        if first_step == "peer":
            peer_workflow = PeerWorkflow.objects.get(
                submission_uuid=submission["uuid"])
            self.assertIsNotNone(peer_workflow)
        else:
            peer_workflows = list(
                PeerWorkflow.objects.filter(
                    submission_uuid=submission["uuid"]))
            self.assertFalse(peer_workflows)
Esempio n. 45
0
    def test_create_workflow(self, data):
        first_step = data["steps"][0] if data["steps"] else "peer"
        submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
        workflow = workflow_api.create_workflow(submission["uuid"], data["steps"])

        workflow_keys = set(workflow.keys())
        self.assertEqual(
            workflow_keys,
            {
                'submission_uuid', 'uuid', 'status', 'created', 'modified', 'score'
            }
        )
        self.assertEqual(workflow["submission_uuid"], submission["uuid"])
        self.assertEqual(workflow["status"], first_step)

        workflow_from_get = workflow_api.get_workflow_for_submission(
            submission["uuid"], data["requirements"]
        )
        del workflow_from_get['status_details']
        self.assertEqual(workflow, workflow_from_get)

        # Test that the Peer Workflow is, or is not created, based on when peer
        # is a step in the workflow.
        if "peer" == first_step:
            peer_workflow = PeerWorkflow.objects.get(submission_uuid=submission["uuid"])
            self.assertIsNotNone(peer_workflow)
        else:
            peer_workflows = list(PeerWorkflow.objects.filter(submission_uuid=submission["uuid"]))
            self.assertFalse(peer_workflows)
Esempio n. 46
0
    def test_cancel_the_assessment_workflow(self):
        # Create the submission and assessment workflow.
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])

        requirements = {"peer": {"must_grade": 1, "must_be_graded_by": 1}}

        # Check the workflow is not cancelled.
        self.assertFalse(workflow_api.is_workflow_cancelled(
            submission["uuid"]))

        # Check the status is not cancelled.
        self.assertNotEqual(workflow.get('status'), 'cancelled')

        # Check the  points_earned are not 0
        self.assertNotEqual(workflow['score'], 0)

        # Cancel the workflow for submission.
        workflow_api.cancel_workflow(submission_uuid=submission["uuid"],
                                     comments="Inappropriate language",
                                     cancelled_by_id=ITEM_2['student_id'],
                                     assessment_requirements=requirements)

        # Check workflow is cancelled.
        self.assertTrue(workflow_api.is_workflow_cancelled(submission["uuid"]))

        # Status for workflow should be cancelled.
        workflow = AssessmentWorkflow.get_by_submission_uuid(
            submission["uuid"])
        self.assertEqual(workflow.status, 'cancelled')

        # Score points_earned should be 0.
        # In case of 0 earned points the score would be None.
        self.assertEqual(workflow.score, None)
Esempio n. 47
0
    def test_get_the_cancelled_workflow(self):
        # Create the submission and assessment workflow.
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])

        requirements = {"peer": {"must_grade": 1, "must_be_graded_by": 1}}

        # Check the workflow is not cancelled.
        self.assertFalse(workflow_api.is_workflow_cancelled(
            submission["uuid"]))

        # Check the status is not cancelled.
        self.assertNotEqual(workflow.get('status'), 'cancelled')

        # Check the  points_earned are not 0
        self.assertNotEqual(workflow['score'], 0)

        cancelled_workflow = workflow_api.get_assessment_workflow_cancellation(
            submission["uuid"])
        self.assertIsNone(cancelled_workflow)

        # Cancel the workflow for submission.
        workflow_api.cancel_workflow(submission_uuid=submission["uuid"],
                                     comments="Inappropriate language",
                                     cancelled_by_id=ITEM_2['student_id'],
                                     assessment_requirements=requirements)

        # Check workflow is cancelled.
        self.assertTrue(workflow_api.is_workflow_cancelled(submission["uuid"]))

        workflow = workflow_api.get_assessment_workflow_cancellation(
            submission["uuid"])
        self.assertIsNotNone(workflow)
Esempio n. 48
0
 def test_unexpected_workflow_get_errors_wrapped(self, data, mock_get):
     with raises(workflow_api.AssessmentWorkflowInternalError):
         mock_get.side_effect = Exception("Kaboom!")
         submission = sub_api.create_submission(ITEM_1, "We talk TV!")
         workflow = workflow_api.create_workflow(submission["uuid"],
                                                 data["steps"])
         workflow_api.get_workflow_for_submission(workflow["uuid"], {})
Esempio n. 49
0
    def _create_workflow_with_status(self, student_id, course_id, item_id, status, answer="answer"):
        """
        Create a submission and workflow with a given status.

        Args:
            student_id (unicode): Student ID for the submission.
            course_id (unicode): Course ID for the submission.
            item_id (unicode): Item ID for the submission
            status (unicode): One of acceptable status values (e.g. "peer", "self", "waiting", "done")

        Kwargs:
            answer (unicode): Submission answer.

        Returns:
            None
        """
        submission = sub_api.create_submission({
            "student_id": student_id,
            "course_id": course_id,
            "item_id": item_id,
            "item_type": "openassessment",
        }, answer)

        workflow = workflow_api.create_workflow(submission['uuid'])
        workflow_model = AssessmentWorkflow.objects.get(uuid=workflow['uuid'])
        workflow_model.status = status
        workflow_model.save()
Esempio n. 50
0
    def _create_dummy_submission(self, student_item):
        """
        Create a dummy submission for a student.

        Args:
            student_item (dict): Serialized StudentItem model.

        Returns:
            str: submission UUID
        """
        answer = {'text': "  ".join(loremipsum.get_paragraphs(5))}
        submission = sub_api.create_submission(student_item, answer)
        workflow_api.create_workflow(submission['uuid'], STEPS)
        workflow_api.update_from_assessments(
            submission['uuid'], {'peer': {'must_grade': 1, 'must_be_graded_by': 1}}
        )
        return submission['uuid']
    def _setup_override_test(self, xblock, mock_score_data):
        
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, 'Bob'
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item['item_id'] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text': 'Bob Answer'})
        peer_api.on_start(submission['uuid'])
        workflow_api.create_workflow(submission['uuid'], ['peer'])

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {'student_id': 'Bob'}
        return request
Esempio n. 52
0
    def test_upload(self):
        # Create an S3 bucket using the fake S3 implementation
        conn = boto.connect_s3()
        conn.create_bucket(self.BUCKET_NAME)

        # Create some submissions to ensure that we cover
        # the progress indicator code.
        for index in range(50):
            student_item = {
                'student_id': "test_user_{}".format(index),
                'course_id': self.COURSE_ID,
                'item_id': 'test_item',
                'item_type': 'openassessment',
            }
            submission_text = "test submission {}".format(index)
            submission = sub_api.create_submission(student_item, submission_text)
            workflow_api.create_workflow(submission['uuid'], ['peer', 'self'])

        # Create and upload the archive of CSV files
        # This should generate the files even though
        # we don't have any data available.
        cmd = upload_oa_data.Command()
        cmd.handle(self.COURSE_ID.encode('utf-8'), self.BUCKET_NAME)

        # Retrieve the uploaded file from the fake S3 implementation
        self.assertEqual(len(cmd.history), 1)
        bucket = conn.get_all_buckets()[0]
        key = bucket.get_key(cmd.history[0]['key'])
        contents = StringIO(key.get_contents_as_string())

        # Expect that the contents contain all the expected CSV files
        with tarfile.open(mode="r:gz", fileobj=contents) as tar:
            file_sizes = {
                member.name: member.size
                for member in tar.getmembers()
            }
            for csv_name in self.CSV_NAMES:
                self.assertIn(csv_name, file_sizes)
                self.assertGreater(file_sizes[csv_name], 0)

        # Expect that we generated a URL for the bucket
        url = cmd.history[0]['url']
        self.assertIn("https://{}".format(self.BUCKET_NAME), url)
Esempio n. 53
0
    def create_workflow(self, submission_uuid):
        """
        Create a new workflow for a student submission.

        Args:
            submission_uuid (str): The UUID of the submission to associate
                with the workflow.

        Returns:
            None

        """
        steps = self._create_step_list()
        ai_module = self.get_assessment_module('example-based-assessment')
        on_init_params = {
            'ai': {
                'rubric': create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
                'algorithm_id': ai_module["algorithm_id"] if ai_module else None
            }
        }
        workflow_api.create_workflow(submission_uuid, steps, on_init_params=on_init_params)
Esempio n. 54
0
    def create_submission(self, student_item_dict, student_sub):

        # Store the student's response text in a JSON-encodable dict
        # so that later we can add additional response fields.
        student_sub_dict = {'text': student_sub}

        submission = api.create_submission(student_item_dict, student_sub_dict)
        workflow_api.create_workflow(submission["uuid"])
        self.submission_uuid = submission["uuid"]

        # Emit analytics event...
        self.runtime.publish(
            self,
            "openassessmentblock.create_submission",
            {
                "submission_uuid": submission["uuid"],
                "attempt_number": submission["attempt_number"],
                "created_at": submission["created_at"],
                "submitted_at": submission["submitted_at"],
                "answer": submission["answer"],
            }
        )

        return submission
Esempio n. 55
0
    def test_create_workflow(self):
        submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
        workflow = workflow_api.create_workflow(submission["uuid"])

        workflow_keys = set(workflow.keys())
        self.assertEqual(
            workflow_keys,
            {
                'submission_uuid', 'uuid', 'status', 'created', 'modified', 'score'
            }
        )
        self.assertEqual(workflow["submission_uuid"], submission["uuid"])
        self.assertEqual(workflow["status"], "peer")

        workflow_from_get = workflow_api.get_workflow_for_submission(
            submission["uuid"], REQUIREMENTS
        )
        del workflow_from_get['status_details']
        self.assertEqual(workflow, workflow_from_get)