Пример #1
0
    def parse_activity_scores(self, activity_attempt):
        '''
           Processes activity scores recieved from the mapper.

           This is called in the mapper callback function.  Each time a student attempts
           a GCB question or a Quizly exercise, a tag-assessment Event is created.
           This processes such events to extract the number of attempts the student
           made and the answers.

           Events are time-stamped and recorded by user_id. They include the instance_id
           of the Component that triggered the Event.  Both GCB questions and Quizly
           exercises have an instance_id.

           However, Quizly exercises don't have question_id and need special processing.

           Use the Dashboard to see what the data looks like for Events:
           https://console.cloud.google.com/datastore/entities/query?
               project=ram8647&ns=ns_mobileCSP&kind=EventEntity
        '''

        if activity_attempt.source == 'tag-assessment':
            data = transforms.loads(activity_attempt.data)
            instance_id = data['instanceid']
            if GLOBAL_DEBUG:
                logging.debug('***********RAM************** data[instanceid] = ' + instance_id)
            timestamp = int(
                (activity_attempt.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())

            # Get information about the course's questions (doesn't include Quizly exercises yet)
            questions = self.params['questions_by_usage_id']
            valid_question_ids = self.params['valid_question_ids']
            assessment_weights = self.params['assessment_weights']
            group_to_questions = self.params['group_to_questions']

            student = Student.get_by_user_id(activity_attempt.user_id)

            #  Get this student's answers so far
            student_answers = self.activity_scores.get(student.email, {})
            if GLOBAL_DEBUG:
                logging.debug('***RAM*** student answers = ' + str(student_answers))

            answers = event_transforms.unpack_check_answers(            # No Quizly answers in here
                data, questions, valid_question_ids, assessment_weights,
                group_to_questions, timestamp)

            # Add the score to right lesson
            # NOTE: This was throwing an exception on Quizly exercises.  Shouldn't happen now
            try: 
                #  If the event is tag-assessment and has no quid, it's a Quizly exercise
                if not 'quid' in data:
                    self.parse_quizly_scores(data, instance_id, timestamp, student, student_answers)
                else:
                    self.parse_question_scores(instance_id, questions, student_answers, answers, student, timestamp)
            except Exception as e:
                logging.error('***********RAM************** bad instance_id: %s %s\n%s', str(instance_id), e, traceback.format_exc())
        if GLOBAL_DEBUG:       
            logging.debug('***RAM*** activity_scores ' + str(self.activity_scores))
        return self.activity_scores
Пример #2
0
    def map(cls, event):
        """Extract question responses from all event types providing them."""

        if event.source not in (
            'submit-assessment',
            'attempt-lesson',
            'tag-assessment'):
            return

        # Fetch global params set up in build_additional_mapper_params(), above.
        params = context.get().mapreduce_spec.mapper.params
        questions_info = params['questions_by_usage_id']
        valid_question_ids = params['valid_question_ids']
        group_to_questions = params['group_to_questions']
        assessment_weights = params['assessment_weights']

        timestamp = int(
            (event.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())
        content = transforms.loads(event.data)

        if event.source == 'submit-assessment':
            answer_data = content.get('values', {})
            # TODO(mgainer): handle assessment-as-form submissions.  Current
            # implementation only understands Question and QuestionGroup;
            # forms are simply submitted as lists of fields.
            # TODO(mgainer): Handle peer-review scoring
            if not isinstance(answer_data, dict):
                return
            version = answer_data.get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, answer_data, timestamp)

        elif event.source == 'attempt-lesson':
            # Very odd that the version should be in the answers map....
            version = content.get('answers', {}).get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, content, timestamp)

        elif event.source == 'tag-assessment':
            answers = event_transforms.unpack_check_answers(
                content, questions_info, valid_question_ids, assessment_weights,
                group_to_questions, timestamp)

        yield (RawAnswersGenerator.TOTAL_STUDENTS, event.user_id)

        # Each answer is a namedtuple; convert to a list for pack/unpack
        # journey through the map/reduce shuffle stage.
        result = [list(answer) for answer in answers]
        for key in cls._generate_keys(event, event.user_id):
            yield (key, result)
Пример #3
0
    def map(cls, event):
        """Extract question responses from all event types providing them."""

        if event.source not in ('submit-assessment', 'attempt-lesson',
                                'tag-assessment'):
            return

        # Fetch global params set up in build_additional_mapper_params(), above.
        params = context.get().mapreduce_spec.mapper.params
        questions_info = params['questions_by_usage_id']
        valid_question_ids = params['valid_question_ids']
        group_to_questions = params['group_to_questions']
        assessment_weights = params['assessment_weights']

        timestamp = int((event.recorded_on -
                         datetime.datetime(1970, 1, 1)).total_seconds())
        content = transforms.loads(event.data)

        if event.source == 'submit-assessment':
            answer_data = content.get('values', {})
            # TODO(mgainer): handle assessment-as-form submissions.  Current
            # implementation only understands Question and QuestionGroup;
            # forms are simply submitted as lists of fields.
            # TODO(mgainer): Handle peer-review scoring
            if not isinstance(answer_data, dict):
                return
            version = answer_data.get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, answer_data, timestamp)

        elif event.source == 'attempt-lesson':
            # Very odd that the version should be in the answers map....
            version = content.get('answers', {}).get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, content, timestamp)

        elif event.source == 'tag-assessment':
            answers = event_transforms.unpack_check_answers(
                content, questions_info, valid_question_ids,
                assessment_weights, group_to_questions, timestamp)

        yield (RawAnswersGenerator.TOTAL_STUDENTS, event.user_id)

        # Each answer is a namedtuple; convert to a list for pack/unpack
        # journey through the map/reduce shuffle stage.
        result = [list(answer) for answer in answers]
        for key in cls._generate_keys(event, event.user_id):
            yield (key, result)
    def parse_activity_scores(self, activity_attempt):
        if activity_attempt.source == 'tag-assessment':
            data = transforms.loads(activity_attempt.data)

            timestamp = int(
            (activity_attempt.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())

            questions = self.params['questions_by_usage_id']
            valid_question_ids = self.params['valid_question_ids']
            assessment_weights = self.params['assessment_weights']
            group_to_questions = self.params['group_to_questions']

            student_answers = self.activity_scores.get(Student.get_student_by_user_id(activity_attempt.user_id).email, {})

            answers = event_transforms.unpack_check_answers(
                data, questions, valid_question_ids, assessment_weights,
                group_to_questions, timestamp)

            #add score to right lesson
            question_info = questions[data['instanceid']]
            unit_answers = student_answers.get(question_info['unit'], {})
            lesson_answers = unit_answers.get(question_info['lesson'], {})

            for answer in answers:
                question_answer_dict = {}
                question_answer_dict['unit_id'] = answer.unit_id
                question_answer_dict['lesson_id'] = answer.lesson_id
                question_answer_dict['sequence'] = answer.sequence
                question_answer_dict['question_id'] = answer.question_id
                question_answer_dict['question_type'] = answer.question_type
                question_answer_dict['timestamp'] = answer.timestamp
                question_answer_dict['answers'] = answer.answers
                question_answer_dict['score'] = answer.score
                question_answer_dict['weighted_score'] = answer.weighted_score
                question_answer_dict['tallied'] = answer.tallied

                if answer.sequence in lesson_answers and lesson_answers[answer.sequence] < timestamp:
                    lesson_answers[answer.sequence] = question_answer_dict
                elif answer.sequence not in lesson_answers:
                    lesson_answers[answer.sequence] = question_answer_dict

            unit_answers[question_info['lesson']] = lesson_answers
            student_answers[question_info['unit']] = unit_answers

            self.activity_scores[Student.get_student_by_user_id(activity_attempt.user_id).email] = student_answers

        return self.activity_scores
Пример #5
0
    def map(event):
        """Extract question responses from all event types providing them."""

        if event.source not in (
            'submit-assessment',
            'attempt-lesson',
            'tag-assessment'):
            return

        # Fetch global params set up in build_additional_mapper_params(), above.
        params = context.get().mapreduce_spec.mapper.params
        questions_info = params['questions_by_usage_id']
        group_to_questions = params['group_to_questions']

        timestamp = int(
            (event.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())
        content = transforms.loads(event.data)

        if event.source == 'submit-assessment':
            answer_data = content.get('values', {})
            # TODO(mgainer): handle assessment-as-form submissions.  Current
            # implementation only understands Question and QuestionGroup;
            # forms are simply submitted as lists of fields.
            # TODO(mgainer): Handle peer-review scoring
            if not isinstance(answer_data, dict):
              return
            version = answer_data.get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, answer_data, timestamp)

        elif event.source == 'attempt-lesson':
            # Very odd that the version should be in the answers map....
            version = content.get('answers', {}).get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, content, timestamp)

        elif event.source == 'tag-assessment':
            answers = event_transforms.unpack_check_answers(
                content, questions_info, group_to_questions, timestamp)

        yield (event.user_id, [list(answer) for answer in answers])
Пример #6
0
    def parse_activity_scores(self, activity_attempt):
        '''
           Processes activity scores recieved from the mapper.

           This is called in the mapper callback function.  Each time a student attempts
           a GCB question or a Quizly exercise, a tag-assessment Event is created.
           This processes such events to extract the number of attempts the student
           made and the answers.

           Events are time-stamped and recorded by user_id. They include the instance_id
           of the Component that triggered the Event.  Both GCB questions and Quizly
           exercises have an instance_id.

           However, Quizly exercises don't have question_id and need special processing.

           Use the Dashboard to see what the data looks like for Events:
           https://console.cloud.google.com/datastore/entities/query?
               project=ram8647&ns=ns_mobileCSP&kind=EventEntity
        '''

        if activity_attempt.source == 'tag-assessment':
            data = transforms.loads(activity_attempt.data)
            instance_id = data['instanceid']
            if GLOBAL_DEBUG:
                logging.debug(
                    '***********RAM************** data[instanceid] = ' +
                    instance_id)
            timestamp = int((activity_attempt.recorded_on -
                             datetime.datetime(1970, 1, 1)).total_seconds())

            # Get information about the course's questions (doesn't include Quizly exercises yet)
            questions = self.params['questions_by_usage_id']
            valid_question_ids = self.params['valid_question_ids']
            assessment_weights = self.params['assessment_weights']
            group_to_questions = self.params['group_to_questions']

            student = Student.get_by_user_id(activity_attempt.user_id)

            #  Get this student's answers so far
            student_answers = self.activity_scores.get(student.email, {})
            if GLOBAL_DEBUG:
                logging.debug('***RAM*** student answers = ' +
                              str(student_answers))

            answers = event_transforms.unpack_check_answers(  # No Quizly answers in here
                data, questions, valid_question_ids, assessment_weights,
                group_to_questions, timestamp)

            # Add the score to right lesson
            # NOTE: This was throwing an exception on Quizly exercises.  Shouldn't happen now
            try:
                #  If the event is tag-assessment and has no quid, it's a Quizly exercise
                if not 'quid' in data:
                    self.parse_quizly_scores(data, instance_id, timestamp,
                                             student, student_answers)
                else:
                    self.parse_question_scores(instance_id, questions,
                                               student_answers, answers,
                                               student, timestamp)
            except Exception as e:
                logging.error(
                    '***********RAM************** bad instance_id: %s %s\n%s',
                    str(instance_id), e, traceback.format_exc())
        if GLOBAL_DEBUG:
            logging.debug('***RAM*** activity_scores ' +
                          str(self.activity_scores))
        return self.activity_scores
    def process_event(cls, event, static_params):
        questions_info = static_params['questions_by_usage_id']
        valid_question_ids = static_params['valid_question_ids']
        group_to_questions = static_params['group_to_questions']
        assessment_weights = static_params['assessment_weights']

        timestamp = int(
            (event.recorded_on - datetime.datetime(1970, 1, 1)).total_seconds())
        content = transforms.loads(event.data)

        answers = None
        if event.source == 'submit-assessment':
            answer_data = content.get('values', {})
            # TODO(mgainer): handle assessment-as-form submissions.  Current
            # implementation only understands Question and QuestionGroup;
            # forms are simply submitted as lists of fields.
            # TODO(mgainer): Handle peer-review scoring
            if not isinstance(answer_data, dict):
                return
            version = answer_data.get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, answer_data, timestamp)
            else:
                logging.warning('Unexpected version %s in submit-assessment '
                                'event handling', version)
        elif event.source == 'attempt-lesson':
            # Very odd that the version should be in the answers map....
            version = content.get('answers', {}).get('version')
            if version == '1.5':
                answers = event_transforms.unpack_student_answer_1_5(
                    questions_info, valid_question_ids, assessment_weights,
                    group_to_questions, content, timestamp)
            else:
                logging.warning('Unexpected version %s in attempt-lesson '
                                'event handling', version)
        elif event.source == 'tag-assessment':
            answers = event_transforms.unpack_check_answers(
                content, questions_info, valid_question_ids, assessment_weights,
                group_to_questions, timestamp)
        if not answers:
            return None

        answer_dicts = []
        total_weighted_score = 0.0

        for answer in answers:
            if not isinstance(answer.answers, (tuple, list)):
                stringified_answers = [unicode(answer.answers)]
            else:
                stringified_answers = [unicode(a) for a in answer.answers]
            answer_dict = {
                'question_id': answer.question_id,
                'responses': stringified_answers,
                }
            answer_dict['score'] = float(answer.score)
            answer_dict['weighted_score'] = float(answer.weighted_score)
            total_weighted_score += answer.weighted_score
            answer_dicts.append(answer_dict)

        submission = {
            'timestamp': answers[0].timestamp,
            'answers': answer_dicts,
            }
        submission['weighted_score'] = total_weighted_score

        assessment = {
            'unit_id': str(answers[0].unit_id),
            'lesson_id': str(answers[0].lesson_id),
            'submissions': [submission],
            }
        return assessment