Exemplo n.º 1
0
    def map(cls, event):
        """Extract question responses from all event types providing them."""

        if event.source not in (
            'submit-assessment',
            'attempt-lesson',
            'tag-assessment'):
            return

        # Fetch global params set up in build_additional_mapper_params(), above.
        params = context.get().mapreduce_spec.mapper.params
        questions_info = params['questions_by_usage_id']
        valid_question_ids = params['valid_question_ids']
        group_to_questions = params['group_to_questions']
        assessment_weights = params['assessment_weights']

        timestamp = int(
            (event.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())
        content = transforms.loads(event.data)

        if event.source == 'submit-assessment':
            answer_data = content.get('values', {})
            # TODO(mgainer): handle assessment-as-form submissions.  Current
            # implementation only understands Question and QuestionGroup;
            # forms are simply submitted as lists of fields.
            # TODO(mgainer): Handle peer-review scoring
            if not isinstance(answer_data, dict):
                return
            version = answer_data.get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, answer_data, timestamp)

        elif event.source == 'attempt-lesson':
            # Very odd that the version should be in the answers map....
            version = content.get('answers', {}).get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, content, timestamp)

        elif event.source == 'tag-assessment':
            answers = event_transforms.unpack_check_answers(
                content, questions_info, valid_question_ids, assessment_weights,
                group_to_questions, timestamp)

        yield (RawAnswersGenerator.TOTAL_STUDENTS, event.user_id)

        # Each answer is a namedtuple; convert to a list for pack/unpack
        # journey through the map/reduce shuffle stage.
        result = [list(answer) for answer in answers]
        for key in cls._generate_keys(event, event.user_id):
            yield (key, result)
Exemplo n.º 2
0
    def map(cls, event):
        """Extract question responses from all event types providing them."""

        if event.source not in ('submit-assessment', 'attempt-lesson',
                                'tag-assessment'):
            return

        # Fetch global params set up in build_additional_mapper_params(), above.
        params = context.get().mapreduce_spec.mapper.params
        questions_info = params['questions_by_usage_id']
        valid_question_ids = params['valid_question_ids']
        group_to_questions = params['group_to_questions']
        assessment_weights = params['assessment_weights']

        timestamp = int((event.recorded_on -
                         datetime.datetime(1970, 1, 1)).total_seconds())
        content = transforms.loads(event.data)

        if event.source == 'submit-assessment':
            answer_data = content.get('values', {})
            # TODO(mgainer): handle assessment-as-form submissions.  Current
            # implementation only understands Question and QuestionGroup;
            # forms are simply submitted as lists of fields.
            # TODO(mgainer): Handle peer-review scoring
            if not isinstance(answer_data, dict):
                return
            version = answer_data.get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, answer_data, timestamp)

        elif event.source == 'attempt-lesson':
            # Very odd that the version should be in the answers map....
            version = content.get('answers', {}).get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, content, timestamp)

        elif event.source == 'tag-assessment':
            answers = event_transforms.unpack_check_answers(
                content, questions_info, valid_question_ids,
                assessment_weights, group_to_questions, timestamp)

        yield (RawAnswersGenerator.TOTAL_STUDENTS, event.user_id)

        # Each answer is a namedtuple; convert to a list for pack/unpack
        # journey through the map/reduce shuffle stage.
        result = [list(answer) for answer in answers]
        for key in cls._generate_keys(event, event.user_id):
            yield (key, result)
Exemplo n.º 3
0
    def map(event):
        """Extract question responses from all event types providing them."""

        if event.source not in (
            'submit-assessment',
            'attempt-lesson',
            'tag-assessment'):
            return

        # Fetch global params set up in build_additional_mapper_params(), above.
        params = context.get().mapreduce_spec.mapper.params
        questions_info = params['questions_by_usage_id']
        group_to_questions = params['group_to_questions']

        timestamp = int(
            (event.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())
        content = transforms.loads(event.data)

        if event.source == 'submit-assessment':
            answer_data = content.get('values', {})
            # TODO(mgainer): handle assessment-as-form submissions.  Current
            # implementation only understands Question and QuestionGroup;
            # forms are simply submitted as lists of fields.
            # TODO(mgainer): Handle peer-review scoring
            if not isinstance(answer_data, dict):
              return
            version = answer_data.get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, answer_data, timestamp)

        elif event.source == 'attempt-lesson':
            # Very odd that the version should be in the answers map....
            version = content.get('answers', {}).get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, content, timestamp)

        elif event.source == 'tag-assessment':
            answers = event_transforms.unpack_check_answers(
                content, questions_info, group_to_questions, timestamp)

        yield (event.user_id, [list(answer) for answer in answers])
Exemplo n.º 4
0
    def map(student_answers):
        params = context.get().mapreduce_spec.mapper.params
        questions_by_usage_id = params['questions_by_usage_id']
        all_answers = transforms.loads(student_answers.data)
        for unit_id, unit_responses in all_answers.items():

            # Is this a CourseBuilder Question/QuestionGroup set of answers?
            if ('containedTypes' in unit_responses and
                unit_responses['version'] == '1.5'):
                for answer in event_transforms.unpack_student_answer_1_5(
                    questions_by_usage_id, unit_responses, timestamp=0):
                    yield (StudentAnswersStatsGenerator.build_key(
                        unit_id, answer.sequence, answer.question_id,
                        answer.question_type), (answer.answers, answer.score))
    def process_event(cls, event, static_params):
        questions_info = static_params['questions_by_usage_id']
        valid_question_ids = static_params['valid_question_ids']
        group_to_questions = static_params['group_to_questions']
        assessment_weights = static_params['assessment_weights']

        timestamp = int(
            (event.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())
        content = transforms.loads(event.data)

        answers = None
        if event.source == 'submit-assessment':
            answer_data = content.get('values', {})
            # TODO(mgainer): handle assessment-as-form submissions.  Current
            # implementation only understands Question and QuestionGroup;
            # forms are simply submitted as lists of fields.
            # TODO(mgainer): Handle peer-review scoring
            if not isinstance(answer_data, dict):
                return
            version = answer_data.get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, answer_data, timestamp)
            else:
                logging.warning('Unexpected version %s in submit-assessment '
                                'event handling', version)
        elif event.source == 'attempt-lesson':
            # Very odd that the version should be in the answers map....
            version = content.get('answers', {}).get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, content, timestamp)
            else:
                logging.warning('Unexpected version %s in attempt-lesson '
                                'event handling', version)
        elif event.source == 'tag-assessment':
            answers = event_transforms.unpack_check_answers(
                content, questions_info, valid_question_ids, assessment_weights,
                group_to_questions, timestamp)
        if not answers:
            return None

        answer_dicts = []
        total_weighted_score = 0.0

        for answer in answers:
            if not isinstance(answer.answers, (tuple, list)):
                stringified_answers = [unicode(answer.answers)]
            else:
                stringified_answers = [unicode(a) for a in answer.answers]
            answer_dict = {
                'question_id': answer.question_id,
                'responses': stringified_answers,
                }
            answer_dict['score'] = float(answer.score)
            answer_dict['weighted_score'] = float(answer.weighted_score)
            total_weighted_score += answer.weighted_score
            answer_dicts.append(answer_dict)

        submission = {
            'timestamp': answers[0].timestamp,
            'answers': answer_dicts,
            }
        submission['weighted_score'] = total_weighted_score

        assessment = {
            'unit_id': str(answers[0].unit_id),
            'lesson_id': str(answers[0].lesson_id),
            'submissions': [submission],
            }
        return assessment