Пример #1
0
 def test_get_team_submission_invalid_uuid(self):
     """
     Test that calling team_api.get_team_submission with an invalid UUID will
     raise a TeamSubmissionInternalError
     """
     with self.assertRaisesMessage(TeamSubmissionInternalError, 'not a valid UUID'):
         team_api.get_team_submission('thisisntauuid')
Пример #2
0
 def test_get_team_submission_missing(self):
     """
     Test that calling team_api.get_team_submission when there is no matching TeamSubmission will
     raise a TeamSubmissionNotFoundError
     """
     with self.assertRaises(TeamSubmissionNotFoundError):
         team_api.get_team_submission('aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee')
Пример #3
0
    def start_workflow(cls, team_submission_uuid):  # pylint: disable=arguments-differ
        """ Start a team workflow """
        team_submission_dict = sub_team_api.get_team_submission(
            team_submission_uuid)
        try:
            referrence_learner_submission_uuid = team_submission_dict[
                'submission_uuids'][0]
        except IndexError as ex:
            msg = f'No individual submission found for team submisison uuid {team_submission_uuid}'
            logger.exception(msg)
            raise AssessmentWorkflowInternalError(msg) from ex

        # Create the workflow in the database
        # For now, set the status to waiting; we'll modify it later
        team_workflow = cls.objects.create(
            team_submission_uuid=team_submission_uuid,
            submission_uuid=referrence_learner_submission_uuid,
            status=TeamAssessmentWorkflow.STATUS.waiting,
            course_id=team_submission_dict['course_id'],
            item_id=team_submission_dict['item_id'])
        team_staff_step = AssessmentWorkflowStep.objects.create(
            workflow=team_workflow, name=cls.TEAM_STAFF_STEP_NAME, order_num=0)
        team_workflow.steps.add(team_staff_step)

        team_assessment_api = team_staff_step.api()
        team_assessment_api.on_init(team_submission_uuid)

        return team_workflow
Пример #4
0
def on_init(team_submission_uuid):
    """
    Create a new team staff workflow for a student item and submission.

    Creates a unique team staff workflow for a student item, associated with a
    team submission.

    Note that the staff workflow begins things in on_init() instead of
    on_start(), because staff shoud be able to access the submission
    regardless of which state the workflow is currently in.

    Args:
        team_submission_uuid (str): The team submission associated with this workflow.

    Returns:
        None

    Raises:
        StaffAssessmentInternalError: Raised when there is an internal error
            creating the Workflow.
    """
    try:
        team_submission = team_submissions_api.get_team_submission(
            team_submission_uuid)
        TeamStaffWorkflow.objects.get_or_create(
            course_id=team_submission['course_id'],
            item_id=team_submission['item_id'],
            team_submission_uuid=team_submission_uuid)
    except DatabaseError:
        error_message = (
            "An internal error occurred while creating a new team staff workflow for team submission {}"
        ).format(team_submission_uuid)
        logger.exception(error_message)
        raise StaffAssessmentInternalError(error_message)
Пример #5
0
def get_assessment_scores_by_criteria(team_submission_uuid):
    """Get the staff score for each rubric criterion

    Args:
        team_submission_uuid (str): The team submission uuid is used to get the
            assessment used to score this submission.

    Returns:
        (dict): A dictionary of rubric criterion names, with a score of
            the staff assessments.

    Raises:
        StaffAssessmentInternalError: If any error occurs while retrieving
            information from the scores, an error is raised.
    """
    try:
        # Get most recently graded assessment for a team submission
        team_submission = team_submissions_api.get_team_submission(
            team_submission_uuid)
        assessments = list(
            Assessment.objects.filter(
                submission_uuid__in=team_submission['submission_uuids'],
                score_type=STAFF_TYPE,
            )[:1])

        scores = Assessment.scores_by_criterion(assessments)
        # Since this is only being sent one score, the median score will be the
        # same as the only score.
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = "Error getting staff assessment scores for {}".format(
            team_submission_uuid)
        logger.exception(error_message)
        raise StaffAssessmentInternalError(error_message)
Пример #6
0
    def get_submission_info(self, submission_uuid, _, suffix=''):  # pylint: disable=unused-argument
        """
        Return a dict representation of a submission in the form
        {
            'text': <list of strings representing the raw response for each prompt>
            'files': <list of:>
                {
                    'download_url': <file url>
                    'description': <file description>
                    'name': <file name>
                }
        }
        """
        try:
            if self.is_team_assignment():
                submission = get_team_submission(submission_uuid)
            else:
                submission = get_submission(submission_uuid)
            answer = OraSubmissionAnswerFactory.parse_submission_raw_answer(
                submission.get('answer'))
        except SubmissionError as err:
            raise JsonHandlerError(404, str(err)) from err
        except VersionNotFoundException as err:
            raise JsonHandlerError(500, str(err)) from err

        return {
            'files': [
                SubmissionDetailFileSerilaizer(file_data).data for file_data in
                self.get_download_urls_from_submission(submission)
            ],
            'text':
            answer.get_text_responses()
        }
Пример #7
0
def create_assessment(
        team_submission_uuid,
        scorer_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        scored_at=None
):
    """
    Creates an assessment for each member of the submitting team.

    Closely mirrors openassessment.assessment.api.staff.py::create_assessment

    Can use _complete_assessment from Staff API as is, but has the side-effect
    of only associating the last graded assessment with the workflow

    Returns:
        dict: the Assessment model, serialized as a dict.
    """
    try:
        try:
            scorer_workflow = TeamStaffWorkflow.objects.get(team_submission_uuid=team_submission_uuid)
        except TeamStaffWorkflow.DoesNotExist:
            scorer_workflow = None

        # Get the submissions for a team
        team_submission = team_submissions_api.get_team_submission(team_submission_uuid)

        assessment_dicts = []
        for submission_uuid in team_submission['submission_uuids']:
            assessment = _complete_assessment(
                submission_uuid,
                scorer_id,
                options_selected,
                criterion_feedback,
                overall_feedback,
                rubric_dict,
                scored_at,
                scorer_workflow
            )
            assessment_dicts.append(full_assessment_dict(assessment))

        return assessment_dicts

    except InvalidRubric as ex:
        error_message = "The rubric definition is not valid."
        logger.exception(error_message)
        raise StaffAssessmentRequestError(error_message) from ex
    except InvalidRubricSelection as ex:
        error_message = "Invalid options were selected in the rubric."
        logger.warning(error_message, exc_info=True)
        raise StaffAssessmentRequestError(error_message) from ex
    except DatabaseError as ex:
        error_message = (
            "An error occurred while creating an assessment by the scorer with this ID: {}"
        ).format(scorer_id)
        logger.exception(error_message)
        raise StaffAssessmentInternalError(error_message) from ex
Пример #8
0
    def test_get_team_submission(self):
        """
        Test that calling team_api.get_team_submission returns the expected team submission
        """
        team_submission_model = self._make_team_submission(create_submissions=True)

        team_submission_dict = team_api.get_team_submission(team_submission_model.uuid)
        self.assertDictEqual(
            team_submission_dict,
            TeamSubmissionSerializer(team_submission_model).data
        )
Пример #9
0
 def wrapped_handler(self, data, suffix=""):  # pylint: disable=unused-argument
     submission_uuid = data.get('submission_uuid', None)
     if not submission_uuid:
         raise JsonHandlerError(400,
                                "Body must contain a submission_uuid")
     if validate:
         try:
             if self.is_team_assignment():
                 get_team_submission(submission_uuid)
             else:
                 get_submission(submission_uuid)
         except SubmissionNotFoundError as exc:
             raise JsonHandlerError(404,
                                    "Submission not found") from exc
         except SubmissionRequestError as exc:
             raise JsonHandlerError(
                 400, "Bad submission_uuid provided") from exc
         except (SubmissionInternalError, Exception) as exc:
             raise JsonHandlerError(
                 500, "Internal error getting submission info") from exc
     return handler(self, submission_uuid, data, suffix=suffix)
Пример #10
0
def get_submission_to_assess(course_id, item_id, scorer_id):
    """
    Get a team submission for staff evaluation.

    Retrieves a team submission for assessment for the given staff member.

    Args:
        course_id (str): The course that we would like to fetch submissions from.
        item_id (str): The student_item (problem) that we would like to retrieve submissions for.
        scorer_id (str): The user id of the staff member scoring this submission

    Returns:
        dict: A student submission for assessment. This contains a 'student_item',
            'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
            used for assessment.

    Raises:
        StaffAssessmentInternalError: Raised when there is an internal error
            retrieving staff workflow information.

    Examples:
        >>> get_submission_to_assess("a_course_id", "an_item_id", "a_scorer_id")
        {
            'student_item': 2,
            'attempt_number': 1,
            'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
            'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
            'answer': { ... }
        }

    """
    team_submission_uuid = TeamStaffWorkflow.get_submission_for_review(
        course_id, item_id, scorer_id)
    if team_submission_uuid:
        try:
            submission_data = team_submissions_api.get_team_submission(
                team_submission_uuid)
            return submission_data
        except DatabaseError:
            error_message = (
                "Could not find a team submission with the uuid {}"
            ).format(team_submission_uuid)
            logger.exception(error_message)
            raise StaffAssessmentInternalError(error_message)
    else:
        logger.info(
            "No team submission found for staff to assess ({}, {})".format(
                course_id,
                item_id,
            ))
        return None
Пример #11
0
def get_latest_staff_assessment(team_submission_uuid):
    """
    Retrieve the latest staff assessment for a team submission.

    Args:
        team_submission_uuid (str): The UUID of the team submission being assessed.

    Returns:
        dict: The serialized assessment model
        or None if no assessments are available

    Raises:
        StaffAssessmentInternalError if there are problems connecting to the database.

    Example usage:

    >>> get_latest_staff_assessment('10df7db776686822e501b05f452dc1e4b9141fe5')
    {
        'points_earned': 6,
        'points_possible': 12,
        'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
        'scorer': "staff",
        'feedback': ''
    }

    """
    try:
        # Get the reference submissions
        team_submission = team_submissions_api.get_team_submission(
            team_submission_uuid)

        # Return the most-recently graded assessment for any team member's submisison
        assessment = Assessment.objects.filter(
            submission_uuid__in=team_submission['submission_uuids'],
            score_type=STAFF_TYPE,
        ).first()
    except DatabaseError as ex:
        msg = ("An error occurred while retrieving staff assessments "
               "for the submission with UUID {uuid}: {ex}").format(
                   uuid=team_submission_uuid, ex=ex)
        logger.exception(msg)
        raise StaffAssessmentInternalError(msg)

    if assessment:
        return full_assessment_dict(assessment)

    return None
Пример #12
0
    def clear_team_state(self, user_id, course_id, item_id, requesting_user_id,
                         submissions):
        """
        This is called from clear_student_state (which is called from the LMS runtime) when the xblock is a team
        assignment, to clear student state for an entire team for a given problem. It will cancel the workflow
        to remove it from the grading pools, and pass through to the submissions team API to orphan the team
        submission and individual submissions so that the team can create a new submission.
        """
        student_item_string = "course {} item {} user {}".format(
            course_id, item_id, user_id)

        if not submissions:
            logger.warning(
                'Attempted to reset team state for %s but no submission was found',
                student_item_string)
            return
        if len(submissions) != 1:
            logger.warning(
                'Unexpected multiple individual submissions for team assignment. %s',
                student_item_string)

        submission = submissions[0]
        team_submission_uuid = str(submission.get('team_submission_uuid',
                                                  None))
        if not team_submission_uuid:
            logger.warning(
                'Attempted to reset team state for %s but submission %s has no team_submission_uuid',
                student_item_string, submission['uuid'])
            return
        # Remove the submission from grading pool
        self._cancel_team_workflow(team_submission_uuid,
                                   "Student and team state cleared",
                                   requesting_user_id)

        from submissions import team_api as team_submissions_api

        # Clean up shared files for the team
        team_id = team_submissions_api.get_team_submission(
            team_submission_uuid).get('team_id', None)
        delete_shared_files_for_team(course_id, item_id, team_id)

        # Tell the submissions API to orphan the submissions to prevent them from being accessed
        team_submissions_api.reset_scores(team_submission_uuid,
                                          clear_state=True)
Пример #13
0
    def add_team_submission_context(self,
                                    context,
                                    team_submission_uuid=None,
                                    individual_submission_uuid=None,
                                    transform_usernames=False):
        """
        Adds team submission information to context dictionary, based on existing team submissions
        Specifically team name and team_usernames

        Params:
            - context (dict): a context dict for rendering a page that we will add values to
            - team_submission_uuid (string): [optional] the uuid of the team submission we want to add context info for
            - individual_submission_uuid (string): [optional] the uuid of an individual submission that's a part of
                                                   the team submission for which we want to add context info
            - transform_usernames (bool): [optional default: False] If False, context['team_usernames'] will be a list
                                          of username strings. If True, it will be a string, in the form
                                          "Username1, Username2, ... UsernameN, and UsernameN+1"

        One of team_submission_uuid and individual_submission_uuid are required, and if they are both provided,
        individual_submission_uuid will be ignored.
        """
        if not any((team_submission_uuid, individual_submission_uuid)):
            raise TypeError(
                "One of team_submission_uuid or individual_submission_uuid must be provided"
            )
        if team_submission_uuid:
            team_submission = get_team_submission(team_submission_uuid)
        elif individual_submission_uuid:
            team_submission = get_team_submission_from_individual_submission(
                individual_submission_uuid)

        team = self.teams_service.get_team_by_team_id(
            team_submission['team_id'])
        context['team_name'] = team.name

        student_ids = get_team_submission_student_ids(
            team_submission['team_submission_uuid'])
        usernames = [
            self.get_username(student_id) for student_id in student_ids
        ]
        if transform_usernames:
            usernames = list_to_conversational_format(usernames)
        context['team_usernames'] = usernames
Пример #14
0
def on_init(team_submission_uuid):
    """
    Create a new team staff workflow for a student item and submission.

    Creates a unique team staff workflow for a student item, associated with a
    team submission.

    Note that the staff workflow begins things in on_init() instead of
    on_start(), because staff shoud be able to access the submission
    regardless of which state the workflow is currently in.

    Args:
        team_submission_uuid (str): The team submission associated with this workflow.

    Returns:
        None

    Raises:
        StaffAssessmentInternalError: Raised when there is an internal error
            creating the Workflow.
    """
    try:
        team_submission = team_submissions_api.get_team_submission(
            team_submission_uuid)
        TeamStaffWorkflow.objects.get_or_create(
            course_id=team_submission['course_id'],
            item_id=team_submission['item_id'],
            team_submission_uuid=team_submission_uuid,
            # submission_uuid is currently not used in any logic in TeamStaffWorkflow, so we don't
            # realy care which submission is chosen and it doesn't need to match the TeamAssessment Workflow.
            # It must be filled because of the unique constraint on the field (can't have multiple '' values)
            submission_uuid=team_submission['submission_uuids'][0],
        )
    except DatabaseError:
        error_message = (
            "An internal error occurred while creating a new team staff workflow for team submission {}"
        ).format(team_submission_uuid)
        logger.exception(error_message)
        raise StaffAssessmentInternalError(error_message)
Пример #15
0
    def submission_path_and_context(self):
        """
        Determine the template path and context to use when
        rendering the response (submission) step.

        Returns:
            tuple of `(path, context)`, where `path` (str) is the path to the template,
            and `context` (dict) is the template context.

        """
        workflow = self.get_team_workflow_info(
        ) if self.teams_enabled else self.get_workflow_info()
        problem_closed, reason, start_date, due_date = self.is_closed(
            'submission')
        user_preferences = get_user_preferences(
            self.runtime.service(self, 'user'))

        path = 'openassessmentblock/response/oa_response.html'
        context = {
            'user_timezone': user_preferences['user_timezone'],
            'user_language': user_preferences['user_language'],
            "xblock_id": self.get_xblock_id(),
            "text_response": self.text_response,
            "text_response_editor": self.text_response_editor,
            "file_upload_response": self.file_upload_response,
            "prompts_type": self.prompts_type,
            "enable_delete_files": False,
            "show_rubric_during_response": self.show_rubric_during_response,
        }

        if self.show_rubric_during_response:
            context['rubric_criteria'] = copy.deepcopy(
                self.rubric_criteria_with_labels)

        # Due dates can default to the distant future, in which case
        # there's effectively no due date.
        # If we don't add the date to the context, the template won't display it.
        if due_date < DISTANT_FUTURE:
            context["submission_due"] = due_date

        # For team assignments, if a user submitted with a past team, that gets precidence.
        # So we first see if they have a submission and load context from that.
        # Otherwise, we fall back to the current team.
        team_id_for_current_submission = None
        if self.is_team_assignment():
            if not workflow:
                team_id_for_current_submission = self.get_team_info().get(
                    'team_id', None)
            else:
                team_submission = get_team_submission(
                    workflow['team_submission_uuid'])
                team_id_for_current_submission = team_submission['team_id']

            # If it's a team assignment, the user hasn't submitted and is not on a team, the assignment is unavailable.
            if team_id_for_current_submission is None:
                path = 'openassessmentblock/response/oa_response_unavailable.html'
                return path, context

        context['file_upload_type'] = self.file_upload_type
        context['allow_multiple_files'] = self.allow_multiple_files
        context['allow_latex'] = self.allow_latex

        file_urls = None

        if self.file_upload_type:
            context['file_urls'] = self.file_manager.file_descriptors(
                team_id=team_id_for_current_submission, include_deleted=True)
            context[
                'team_file_urls'] = self.file_manager.team_file_descriptors(
                    team_id=team_id_for_current_submission)
            context['white_listed_file_types'] = [
                '.' + ext for ext in self.get_allowed_file_types_or_preset()
            ]

        if not workflow and problem_closed:
            if reason == 'due':
                path = 'openassessmentblock/response/oa_response_closed.html'
            elif reason == 'start':
                context['submission_start'] = start_date
                path = 'openassessmentblock/response/oa_response_unavailable.html'
        elif not workflow:
            # For backwards compatibility. Initially, problems had only one prompt
            # and a string answer. We convert it to the appropriate dict.
            no_workflow_path = "openassessmentblock/response/oa_response.html"

            try:
                json.loads(self.saved_response)
                saved_response = {
                    'answer': json.loads(self.saved_response),
                }
            except ValueError:
                saved_response = {
                    'answer': {
                        'text': self.saved_response,
                    },
                }

            context['saved_response'] = create_submission_dict(
                saved_response, self.prompts)
            context['save_status'] = self.save_status
            context['enable_delete_files'] = True

            submit_enabled = True
            if self.text_response == 'required' and not self.saved_response:
                submit_enabled = False
            if self.file_upload_response == 'required' and not file_urls:
                submit_enabled = False
            if self.text_response == 'optional' and self.file_upload_response == 'optional' \
                    and not self.saved_response and not file_urls:
                submit_enabled = False
            context['submit_enabled'] = submit_enabled

            if self.teams_enabled:
                self.get_team_submission_context(context)
                if self.does_team_have_submission(context['team_id']):
                    no_workflow_path = 'openassessmentblock/response/oa_response_team_already_submitted.html'

            path = no_workflow_path
        elif workflow["status"] == "cancelled":
            if self.teams_enabled:
                context[
                    "workflow_cancellation"] = self.get_team_workflow_cancellation_info(
                        workflow["team_submission_uuid"])
            else:
                context[
                    "workflow_cancellation"] = self.get_workflow_cancellation_info(
                        self.submission_uuid)
            context["student_submission"] = self.get_user_submission(
                workflow["submission_uuid"])
            path = 'openassessmentblock/response/oa_response_cancelled.html'
        elif workflow["status"] == "done":
            student_submission = self.get_user_submission(
                workflow["submission_uuid"])
            context["student_submission"] = create_submission_dict(
                student_submission, self.prompts)
            path = 'openassessmentblock/response/oa_response_graded.html'
        else:
            student_submission = self.get_user_submission(
                workflow["submission_uuid"])
            peer_in_workflow = "peer" in workflow["status_details"]
            self_in_workflow = "self" in workflow["status_details"]
            context["peer_incomplete"] = peer_in_workflow and not workflow[
                "status_details"]["peer"]["complete"]
            context["self_incomplete"] = self_in_workflow and not workflow[
                "status_details"]["self"]["complete"]
            context["student_submission"] = create_submission_dict(
                student_submission, self.prompts)
            path = 'openassessmentblock/response/oa_response_submitted.html'

        return path, context