def put(self, *args, **kwargs):
     parser = reqparse.RequestParser()
     parser.add_argument('action', type=str, required=True, choices=['approve', 'reject'])
     args = parser.parse_args()
     if args['action'] == 'reject':
         sub = SolutionSubmissionModel.query.get(kwargs['id'])
         if sub is not None:
             sub.status = 'rejected'
             Question.reset_solution(sub.question_id, sub.solution_type)
             db.session.commit()
     if args['action'] == 'approve':
         sub = SolutionSubmissionModel.query.get(kwargs['id'])
         if sub is not None:
             sub.status = 'accepted'
             # create approval entry
             SolutionApproval.create(sub.id, kwargs['user_type'].id, kwargs['user'].id)
             Question.approve_solution(sub.question_id, sub.solution_type)
             db.session.commit()
     return {'error': False}
    def get_analysis(attempted_mock_test_id):
        attempted_mock_test = AttemptedMockTestModel.query.get(
            attempted_mock_test_id)
        if attempted_mock_test is None:
            raise InvalidAttemptedMockTestId
        mock_test = MockTest.query.get(attempted_mock_test.mock_test_id)
        if mock_test is None:
            raise InvalidMockTestId

        if attempted_mock_test.answers is None:
            return {'error': True, 'message': 'No answers yet'}

        attempted_mock_test.analysis = json.loads(attempted_mock_test.analysis)

        attempted_mock_test.analysis[
            'percentile'] = AttemptedMockTest.get_percentile(
                attempted_mock_test)

        attempted_mock_test.analysis['cutoff'] = mock_test.cutoff
        rank_colleges = AttemptedMockTest.get_rank_and_college(
            attempted_mock_test, mock_test)
        if rank_colleges is not None:
            attempted_mock_test.analysis[
                'expected_rank'], attempted_mock_test.analysis[
                    'expected_colleges'] = rank_colleges

        attempted_mock_test.analysis = json.dumps(attempted_mock_test.analysis)

        question_data = json.loads(mock_test.question_ids)
        question_ids = []
        for subject_id, data in question_data.items():
            data['subject_id'] = subject_id
            question_ids.extend(data['q_ids'])
        questions = Question.get_filtertered_list(
            include_question_ids=question_ids)['questions']

        return {
            'attempted_mock_test': attempted_mock_test,
            'mock_test': mock_test,
            'questions': questions
        }
示例#3
0
 def get(self, *args, **kwargs):
     mock_test = MockTest.query.get(kwargs['id'])
     if mock_test is None:
         raise InvalidMockTestId
     pushed_batch_ids = [
         p.batch_id for p in PushedMockTest.query.filter(
             PushedMockTest.mock_test_id == mock_test.id).all()
     ]
     batches = Batch.get_filtered(include_ids=pushed_batch_ids,
                                  institute_id=kwargs['user'].id)
     mock_test.batches_pushed_to = [{
         'id': b.id,
         'name': b.name,
         'class': b.clazz
     } for b in batches]
     question_ids = []
     for sid, data in json.loads(mock_test.question_ids).items():
         question_ids.extend(data['q_ids'])
     questions = Question.get_filtertered_list(
         include_question_ids=question_ids)['questions']
     return {'mock_test': mock_test, 'questions': questions}
    def get(self, *args, **kwargs):
        parser = reqparse.RequestParser()
        parser.add_argument('mock_test_id', type=int, required=True)
        args = parser.parse_args()
        mock_test_id = args['mock_test_id']
        mock_test = MockTest.query.get(mock_test_id)
        if mock_test is None:
            raise InvalidMockTestId
        question_data = json.loads(mock_test.question_ids)
        question_ids = []
        for subject_id, data in question_data.items():
            data['subject_id'] = subject_id
            question_ids.extend(data['q_ids'])

        sorted_question_data = sorted(question_data.values(),
                                      key=lambda d: d['order'])
        questions = Question.get_filtertered_list(
            include_question_ids=question_ids)['questions']
        """
        for question in questions:
            print '--options--'
            print question.correct_options
            print '---$$$---'
            question.correct_options = None
            question.option_reasons = None
            question.text_solution = None
            question.video_solution_url = None
            question.similar_question_ids = None
            question.average_time = None
        """

        questions = {q.id: q for q in questions}
        for subject in sorted_question_data:
            subject['questions'] = map(lambda q_id: questions[q_id],
                                       subject['q_ids'])
        return {'mock_test': mock_test, 'subjects': sorted_question_data}
    def get(self, *args, **kwargs):
        """
        Get questions questions not similar to the given question id

        :param args:
        :param kwargs:
        :return:
        """
        parser = reqparse.RequestParser()
        parser.add_argument('nature',
                            type=str,
                            choices=app.config['QUESTION_NATURE'].keys())
        parser.add_argument('type',
                            type=str,
                            choices=app.config['QUESTION_TYPE'].keys())
        parser.add_argument('difficulty',
                            type=str,
                            choices=app.config['QUESTION_DIFFICULTY_LEVEL'])
        parser.add_argument('average_time',
                            type=int,
                            choices=map(int,
                                        app.config['QUESTION_AVERAGE_TIME']))

        # this contains comma separated ontology node ids
        parser.add_argument('ontology', type=comma_separated_ints_type)

        parser.add_argument('question_id', type=int)

        parser.add_argument('offset', type=int, default=0)

        args = parser.parse_args()

        if args['question_id'] is None:
            exprs = [
                Question.is_similarity_marked == False,
                Question.status['categorized'] == '1'
            ]
            if args['nature'] is not None:
                exprs.append(Question.nature == args['nature'])
            if args['type'] is not None:
                exprs.append(Question.type == args['type'])
            if args['difficulty'] is not None:
                exprs.append(Question.difficulty == args['difficulty'])
            if args['average_time'] is not None:
                exprs.append(Question.average_time == args['average_time'])
            if args['ontology'] is not None:
                exprs.append(Question.ontology == args['ontology'])

            question = Question.query.filter(*exprs).offset(
                args['offset']).first()
            if question is None:
                return {'questions': [], 'total': 0}
            else:
                other_questions = Question.get_filtertered_list(
                    nature=args['nature'],
                    type=args['type'],
                    difficulty=args['difficulty'],
                    average_time=args['average_time'],
                    ontology=args['ontology'],
                    categorized='1',
                    exclude_question_ids=[
                        question.id,
                    ])

                if other_questions['total'] == 0:
                    skip = args['offset'] + 1
                    while question is not None:
                        question = Question.query.filter(
                            *exprs).offset(skip).first()
                        if question is None:
                            return {'questions': [], 'total': 0}
                        other_questions = Question.get_filtertered_list(
                            nature=args['nature'],
                            type=args['type'],
                            difficulty=args['difficulty'],
                            average_time=args['average_time'],
                            ontology=args['ontology'],
                            categorized='1',
                            exclude_question_ids=[
                                question.id,
                            ])
                        if other_questions['total'] > 0:
                            break
                        skip += 1

                return {
                    'questions': [question] + other_questions['questions'],
                    'total': other_questions['total'] + 1
                }
        else:
            question_id = args['question_id']
            question = Question.get(question_id)

            other_questions = Question.get_filtertered_list(
                ontology=question.ontology,
                categorized='1',
                exclude_question_ids=[
                    question.id,
                ])

            return {
                'questions': [question] + other_questions['questions'],
                'total': other_questions['total'] + 1
            }
示例#6
0
def add_questions_to_db_and_mock_test(paper_questions, comprehensions,
                                      mock_test_id):
    """This method adds the given questions (result of `parse_question`) to the DB
    and also adds them to the questions_ids attribute of the mock test row.
    """

    ## Get the mock test
    mock_test = MockTest.query.get(mock_test_id)

    ## Get the S3 buckets here (otherwise will have to make calls for every content block)
    conn = S3Connection(config.S3_ACCESS_KEY, config.S3_SECRET)
    question_files_final_bucket = conn.get_bucket(
        app.config['S3_QUESTION_FILES_FINAL_BUCKET'])

    ## Upload the questions to the DB if there are no errors
    added_questions = []
    comprehension_ids = {}

    print 'Number of Questions: {0}'.format(len(paper_questions))

    for question in paper_questions:
        status = dict(config.QUESTION_STATUS.items())
        status['categorized'] = '1'
        status['text_solution_added'] = '1'
        # status['proof_read_categorization'] = '1'

        # make a list of the correct options
        correct_options = []
        for i in range(len(question['options']['values'])):
            if question['options']['values'][i]['correct']:
                correct_options.append(i)

        # move the images to s3 and change the markup accordingly
        """
        question['body']['value'] = move_images_to_final_bucket(question['body']['value'], question_files_final_bucket)
        question['text_solution']['value'] = move_images_to_final_bucket(question['text_solution']['value'], question_files_final_bucket)
        for i in range(len(question['options']['values'])):
            question['options']['values'][i]['value'] = move_images_to_final_bucket(question['options']['values'][i]['value'],
                                                                                        question_files_final_bucket)
        for i in range(len(comprehensions)):
            comprehensions[i]['value'] = move_images_to_final_bucket(comprehensions[i]['value'], question_files_final_bucket)
        """

        # create a comprehension if needed or just pick up a comprehension ID
        comprehension_id = None
        if question['comprehension']:
            if comprehension_ids.get(question['comprehension_index']):
                comprehension_id = comprehension_ids[
                    question['comprehension_index']]
            else:
                comp_ = comprehensions[question['comprehension_index']]
                comprehension = Comprehension.create(comp_['value'])
                db.session.add(comprehension)
                db.session.commit()
                comprehension_id = comprehension.id
                comprehension_ids[
                    question['comprehension_index']] = comprehension.id

        # create the question in the DB
        question_data = {
            'content':
            question['body']['value'],
            'status':
            status,
            'all_options':
            [option['value'] for option in question['options']['values']],
            'correct_options':
            correct_options,
            'ontology_id':
            question['ontology']['value'][-1],
            'average_time':
            int(question['attributes']['average_time']['value']),
            'nature':
            question['attributes']['nature']['value'],
            'difficulty':
            question['attributes']['difficulty']['value'],
            'type':
            question['attributes']['type']['value'],
            'text_solution':
            question['text_solution']['value'],
            'text_solution_by_type':
            1,
            'text_solution_by_id':
            app.config['AUTO_UPLOAD_DTP_ID'],
            'comprehension_id':
            comprehension_id
        }
        question_ = Question.create(**question_data)
        added_questions.append([question_, question['ontology']['value']])

        # create the attached text solution submission row in the db too
        solution_submission_params = {
            'submitted_by_type': 3,
            'submitted_by_id': app.config['AUTO_UPLOAD_TEACHER_ID'],
            'question_id': question_.id,
            'solution_type': 'text',
            'solution': question['text_solution']['value'],
        }
        SolutionSubmission.create(**solution_submission_params)

        # create the attached category submission row in the db too
        last_ontology_obj = Ontology.query.get(question_data['ontology_id'])
        category_submission_params = {
            'submitted_by_type': 3,
            'submitted_by_id': app.config['AUTO_UPLOAD_TEACHER_ID'],
            'question_id': question_.id,
            'ontology': last_ontology_obj.absolute_path,
            'nature': question_data['nature'],
            'type': question_data['type'],
            'difficulty': question_data['difficulty'],
            'average_time': question_data['average_time']
        }
        CategorySubmission.create(**category_submission_params)

    ## Add the questions to the mock Test
    mock_test_questions = {}
    order = -1
    for question, ontology in added_questions:
        subject_id = ontology[0]
        if str(subject_id) not in mock_test_questions:
            print str(subject_id)
            order = order + 1
            mock_test_questions.update(
                {str(subject_id): {
                     'order': order,
                     'q_ids': [question.id]
                 }})
            continue
        if str(subject_id) in mock_test_questions:
            mock_test_questions[str(subject_id)]['q_ids'].append(question.id)
            continue

    print mock_test_questions.keys()

    ## Add the `mock_test_questions` to the mock test
    mock_test.question_ids = json.dumps(mock_test_questions)
    db.session.add(mock_test)
    db.session.commit()

    return True
    def post(self, *args, **kwargs):
        parser = reqparse.RequestParser()
        parser.add_argument('mock_test_id', type=int, required=True)
        parser.add_argument('pushed_mock_test_id', type=int)
        parser.add_argument('answers',
                            type=self.__class__.answers_json_type,
                            required=True)
        args = parser.parse_args()
        mock_test_id = args['mock_test_id']
        pushed_mock_test_id = args['pushed_mock_test_id']
        mock_test = MockTest.query.get(mock_test_id)
        if mock_test is None:
            raise InvalidMockTestId

        # get attempted mock tests by the student which have same pushed id as in this request or a null pushed id and
        # same mock test id as this request. If such a mock test is found error is returned. This prevents reattempting
        # the mock test probably from a different browser/ browser tab
        amt = AttemptedMockTest.query.filter(
            AttemptedMockTest.student_id == kwargs['user'].id,
            or_(
                and_(
                    AttemptedMockTest.pushed_mock_test_id != None,
                    AttemptedMockTest.pushed_mock_test_id ==
                    pushed_mock_test_id),
                and_(AttemptedMockTest.pushed_mock_test_id == None,
                     AttemptedMockTest.mock_test_id == mock_test_id))).all()
        if len(amt) > 0:
            raise MockTestTestAlreadyAttempted

        # get attempted mock tests of the same type and check if number of permitted mock tests as per payment plan is
        # exceeded or not
        attempted_mock_test_ids = [
            amt.mock_test_id for amt in AttemptedMockTest.query.filter(
                AttemptedMockTest.student_id == kwargs['user'].id).all()
        ]
        attempted_mock_tests = MockTest.query.filter(
            MockTest.id.in_(attempted_mock_test_ids)).all()
        attempted_mock_tests_of_type = filter(
            lambda m: m.type == mock_test.type, attempted_mock_tests)
        if len(attempted_mock_tests_of_type) >= app.config['PAYMENT_PLAN'][
                mock_test.type]:
            raise PaymentPlanLimitReached

        # create attempted test entry
        attempted_mock_test = AttemptedMockTest(
            pushed_mock_test_id=pushed_mock_test_id,
            mock_test_id=mock_test_id,
            student_id=kwargs['user'].id,
            attempted_at=datetime.datetime.utcnow())
        answers = args['answers']
        question_ids = answers.keys()
        questions = {
            q.id: q
            for q in Question.get_filtertered_list(
                include_question_ids=question_ids)['questions']
        }
        if len(question_ids) != len(questions):
            raise InvalidQuestionId
        marking_scheme = app.config['MARKING_SCHEME']
        target_exam = mock_test.target_exam

        maximum_marks = 0
        total_marks = 0
        subject_wise = {}
        topic_wise = {}

        question_overtime = app.config['QUESTION_OVER_TIME']

        perfect_attempts = []
        wasted_attempts = []
        overtime_attempts = []
        completely_wasted_attempts = []

        ontology = {node.id: node for node in Ontology.get_all_nodes_of_tree()}

        # dictionary with string value of duration as key and value as question id
        durations_dict = {}

        # list with durations of questions
        durations_list = []

        for question_id, value in answers.items():
            question_id = int(question_id)
            question = questions[question_id]
            subject_id = question.ontology[0]
            topic_id = None
            for node_id in question.ontology:
                if node_id in ontology:
                    if ontology[node_id].type == '3':
                        topic_id = node_id
                        break

            # subject seen first time
            if subject_id not in subject_wise:
                subject_wise[subject_id] = {
                    'name': ontology[subject_id].name,
                    'topic_ids': [],
                    'correct': [],
                    'incorrect': [],
                    'not_attempted': [],
                    'marks': 0,
                    'time': 0,
                    'maximum_marks': 0,
                    'perfect_attempts': [],
                    'wasted_attempts': [],
                    'overtime_attempts': [],
                    'completely_wasted_attempts': [],
                }

            # topic seen first time
            if topic_id is not None and topic_id not in topic_wise:
                topic_wise[topic_id] = {
                    'name': ontology[topic_id].name,
                    'correct': [],
                    'incorrect': [],
                    'not_attempted': [],
                    'marks': 0,
                    'time': 0,
                    'maximum_marks': 0,
                    'perfect_attempts': [],
                    'wasted_attempts': [],
                    'overtime_attempts': [],
                    'completely_wasted_attempts': [],
                }
                subject_wise[subject_id]['topic_ids'].append(topic_id)

            if subject_id not in marking_scheme[target_exam]:
                # subject id not added in marking scheme config, indicates config errors
                print 'subject id %s not added in marking scheme config, indicates config errors' % str(
                    subject_id)
                continue

            if question.type not in marking_scheme[target_exam][subject_id]:
                # question type not added for subject in marking scheme config, indicates config errors
                print 'question type %s not added for subject in marking scheme config, indicates config errors' % str(
                    question.type)
                continue

            print '-----------------------------------'
            print question.correct_options
            print '-----------------------------------'

            # if not attempted
            if len(value['options']) == 0:
                marks = marking_scheme[target_exam][subject_id][
                    question.type]['not_attempted']
                value['marks'] = marks
                value['is_correct'] = False
                subject_wise[subject_id]['not_attempted'].append(question.id)
                if topic_id is not None:
                    topic_wise[topic_id]['not_attempted'].append(question.id)

            # if correct
            elif set(question.correct_options) == (set(value['options'])):
                marks = marking_scheme[target_exam][subject_id][
                    question.type]['correct']
                value['marks'] = marks
                value['is_correct'] = True
                subject_wise[subject_id]['correct'].append(question.id)
                if topic_id is not None:
                    topic_wise[topic_id]['correct'].append(question.id)
                if value['time'] < question.average_time + question_overtime:
                    subject_wise[subject_id]['perfect_attempts'].append(
                        question_id)
                    if topic_id is not None:
                        topic_wise[topic_id]['perfect_attempts'].append(
                            question_id)
                    perfect_attempts.append(question.id)
                else:
                    subject_wise[subject_id]['overtime_attempts'].append(
                        question_id)
                    if topic_id is not None:
                        topic_wise[topic_id]['overtime_attempts'].append(
                            question_id)
                    overtime_attempts.append(question.id)

            # if incorrect
            else:
                marks = marking_scheme[target_exam][subject_id][
                    question.type]['incorrect']
                value['marks'] = marks
                value['is_correct'] = False
                subject_wise[subject_id]['incorrect'].append(question.id)
                if topic_id is not None:
                    topic_wise[topic_id]['incorrect'].append(question.id)
                if value['time'] <= question.average_time:
                    subject_wise[subject_id]['wasted_attempts'].append(
                        question_id)
                    if topic_id is not None:
                        topic_wise[topic_id]['wasted_attempts'].append(
                            question_id)
                    wasted_attempts.append(question.id)
                else:
                    subject_wise[subject_id][
                        'completely_wasted_attempts'].append(question_id)
                    if topic_id is not None:
                        topic_wise[topic_id][
                            'completely_wasted_attempts'].append(question_id)
                    completely_wasted_attempts.append(question.id)

            for duration in value['durations']:
                duration_key = self.get_duration_key(duration)
                if duration_key is not None:
                    durations_dict[duration_key] = question.id
                    durations_list.append(duration)

            correct_answer_marks = marking_scheme[target_exam][subject_id][
                question.type]['correct']
            subject_wise[subject_id]['time'] += value['time']
            subject_wise[subject_id]['marks'] += marks
            subject_wise[subject_id]['maximum_marks'] += correct_answer_marks
            if topic_id is not None:
                topic_wise[topic_id]['time'] += value['time']
                topic_wise[topic_id]['marks'] += marks
                topic_wise[topic_id]['maximum_marks'] += correct_answer_marks

            total_marks += marks
            maximum_marks += correct_answer_marks

        total_time = 0
        total_correct = 0
        total_incorrect = 0
        total_not_attempted = 0
        overall_correct_q_ids = []
        overall_incorrect_q_ids = []
        total_ideal_time = 0
        total_taken_time = 0

        for sub in subject_wise.values():
            overall_correct_q_ids.extend(sub['correct'])
            overall_incorrect_q_ids.extend(sub['incorrect'])
            sub['accuracy'] = round(
                len(sub['correct']) * 100.0 /
                (len(sub['correct']) + len(sub['incorrect'])),
                2) if (len(sub['correct']) +
                       len(sub['incorrect'])) > 0 else 0.0
            total_time += sub['time']
            total_correct += len(sub['correct'])
            total_incorrect += len(sub['incorrect'])
            total_not_attempted += len(sub['not_attempted'])

        overall_attempted_count = len(overall_correct_q_ids) + len(
            overall_incorrect_q_ids)
        overall_accuracy = round(
            len(overall_correct_q_ids) * 100.0 /
            overall_attempted_count, 2) if overall_attempted_count > 0 else 0.0

        for q_id in overall_correct_q_ids + overall_incorrect_q_ids:
            q = questions[int(q_id)]
            total_ideal_time += q.average_time
            total_taken_time += answers[str(q_id)]['time']
        overall_speed = total_ideal_time - total_taken_time

        num_subjects = len(subject_wise.keys())
        attempt_order_time_window_length = total_time / (num_subjects * 10)
        sorted_durations_list = sorted(durations_list, key=lambda d: d[0])
        subjects_attempt_order = []
        if int(attempt_order_time_window_length) > 0:
            for current_time_window_start in xrange(
                    0, int(math.ceil(total_time)),
                    int(attempt_order_time_window_length)):
                current_time_window_end = current_time_window_start + attempt_order_time_window_length
                i = -1
                j = -1
                for index, duration in enumerate(sorted_durations_list):
                    if len(duration) != 2:
                        continue
                    # if current_time_window_start lies in the current duration
                    if duration[0] <= current_time_window_start < duration[1]:
                        i = index
                        # if current_time_window_end lies in the current duration
                    if duration[0] < current_time_window_end <= duration[1]:
                        j = index
                        break

                # if time window start and end lie inside test duration
                if i != -1 and j != -1:
                    sub = []
                    for d in sorted_durations_list[i:j + 1]:
                        question_id = durations_dict[self.get_duration_key(d)]
                        question = questions[question_id]
                        sub.append(question.ontology[0])
                    c = Counter(sub)
                    subjects_attempt_order.append(c.most_common(1)[0][0])

                # if time window start lies inside test duration but time window end does not
                elif i != -1 and j == -1:
                    sub = []
                    for d in sorted_durations_list[i:]:
                        question_id = durations_dict[self.get_duration_key(d)]
                        question = questions[question_id]
                        sub.append(question.ontology[0])
                    c = Counter(sub)
                    subjects_attempt_order.append(c.most_common(1)[0][0])

        attempted_mock_test.answers = json.dumps(answers)
        attempted_mock_test.score = total_marks
        attempted_mock_test.analysis = json.dumps({
            'subjects':
            subject_wise,
            'topics':
            topic_wise,
            'perfect':
            perfect_attempts,
            'overtime':
            overtime_attempts,
            'wasted':
            wasted_attempts,
            'completely_wasted':
            completely_wasted_attempts,
            'total_marks':
            total_marks,
            'maximum_marks':
            maximum_marks,
            'percentage_marks':
            round((total_marks * 100.0 /
                   maximum_marks), 2) if maximum_marks > 0 else 0.0,
            'total_time':
            total_time,
            'total_correct':
            total_correct,
            'total_incorrect':
            total_incorrect,
            'total_not_attempted':
            total_not_attempted,
            'attempt_order_time_window_length':
            attempt_order_time_window_length,
            'subjects_attempt_order':
            subjects_attempt_order,
            'accuracy':
            overall_accuracy,
            'speed':
            overall_speed
        })

        db.session.add(attempted_mock_test)
        db.session.commit()
        upload_report_and_send_email.delay(attempted_mock_test.id)
        return {'attempted_mock_test': attempted_mock_test}
    def get_cumulative_analysis(student_id, institute_id=None):
        # get mock tests by student which have completed
        if institute_id is not None:
            batches = Batch.get_filtered(institute_id=institute_id)
            pushed_mock_tests = PushedMockTest.query.filter(
                PushedMockTest.batch_id.in_([b.id for b in batches]))
            attempted_mock_tests = AttemptedMockTest.query.filter(
                AttemptedMockTest.student_id == student_id,
                AttemptedMockTest.score != None,
                AttemptedMockTest.pushed_mock_test_id.in_(
                    [p.id for p in pushed_mock_tests])).all()
        else:
            attempted_mock_tests = AttemptedMockTest.query.filter(
                AttemptedMockTest.student_id == student_id,
                AttemptedMockTest.score != None).all()
        mock_test_ids = [amt.mock_test_id for amt in attempted_mock_tests]
        mock_tests = MockTest.query.filter(
            MockTest.id.in_(mock_test_ids)).all()
        question_ids = set()
        overall_correct_q_ids = set()
        overall_incorrect_q_ids = set()
        overall_not_attempted_q_ids = set()
        total_ideal_time = 0
        total_taken_time = 0

        for amt in attempted_mock_tests:
            analysis = json.loads(amt.analysis)
            subjects = analysis['subjects']
            for sid in subjects:
                overall_correct_q_ids.update(set(subjects[sid]['correct']))
                overall_incorrect_q_ids.update(set(subjects[sid]['incorrect']))
                overall_not_attempted_q_ids.update(
                    set(subjects[sid]['not_attempted']))

        question_ids.update(overall_correct_q_ids)
        question_ids.update(overall_incorrect_q_ids)
        question_ids.update(overall_not_attempted_q_ids)
        questions = {
            q.id: q
            for q in Question.get_filtertered_list(
                include_question_ids=list(question_ids))['questions']
        }
        overall_attempted_count = len(overall_incorrect_q_ids) + len(
            overall_correct_q_ids)
        accuracy = round(
            len(overall_correct_q_ids) * 100.0 /
            overall_attempted_count, 2) if overall_attempted_count > 0 else 0.0

        for amt in attempted_mock_tests:
            answers = json.loads(amt.answers)
            for q_id, answer in answers.items():
                q_id = int(q_id)
                # if attempted question
                if len(answer['options']) != 0 and q_id in questions:
                    total_ideal_time += questions[q_id].average_time
                    total_taken_time += answer['time']

        overall_speed = total_ideal_time - total_taken_time

        return {
            'attempted_mock_tests': attempted_mock_tests,
            'mock_tests': mock_tests,
            'questions': questions.values(),
            'accuracy': accuracy,
            'speed': overall_speed
        }
示例#9
0
    def get(self, *args, **kwargs):
        attempted_mock_test_id = kwargs['id']
        attempted_mock_test = AttemptedMockTest.query.filter(
            AttemptedMockTest.id == attempted_mock_test_id,
            AttemptedMockTest.pdf_report_url == None).first()
        if attempted_mock_test is None:
            return '404 Not Found', 404

        mock_test = MockTest.query.get(attempted_mock_test.mock_test_id)
        if mock_test is None:
            return '404 Not Found', 404

        if attempted_mock_test.answers is None:
            return '404 Not Found', 404

        page = request.args.get('page')

        ontology = {node.id: node for node in Ontology.get_all_nodes_of_tree()}

        analysis = json.loads(attempted_mock_test.analysis)
        for sid, data in analysis['subjects'].items():
            data['name'] = ontology[int(sid)].name

        analysis['cutoff'] = mock_test.cutoff
        analysis['percentile'] = AttemptedMockTestResource.get_percentile(
            attempted_mock_test)
        rank_colleges = AttemptedMockTestResource.get_rank_and_college(
            attempted_mock_test, mock_test)
        if rank_colleges is not None:
            analysis['expected_rank'], analysis[
                'expected_colleges'] = rank_colleges

        common_page_vars = {
            'page': page,
            'analysis': analysis,
            'mock_test_name': mock_test.name,
            'target_exam_name':
            app.config['TARGET_EXAMS'][mock_test.target_exam]
        }

        if page == 'page1':
            return render_template('pdf_report.html', **common_page_vars)

        if page in (None, 'page2'):
            MAXIMUM_TIME_WIDTH = 1000.0  # px
            MAXIMUM_TIME = mock_test.duration  # seconds
            ATTEMPT_TIME_DISPLAY_UNIT_SECONDS = 150  # seconds
            #ATTEMPT_TIME_DISPLAY_UNIT_WIDTH = (MAXIMUM_TIME_WIDTH/MAXIMUM_TIME)*ATTEMPT_TIME_DISPLAY_UNIT_SECONDS     # px
            subject_attempt_order = []
            time_window_chunk_length = int(
                math.floor(ATTEMPT_TIME_DISPLAY_UNIT_SECONDS /
                           analysis['attempt_order_time_window_length']))
            temp = []
            for i, sid in enumerate(analysis['subjects_attempt_order']):
                temp.append(sid)
                if (i + 1) % time_window_chunk_length == 0:
                    temp.append(sid)
                    c = Counter(temp)
                    subject_attempt_order.append(c.most_common(1)[0][0])
                    temp = []

            color_classes = ['info', 'primary', 'success', 'warning', 'danger']
            unique_subject_ids = list(set(subject_attempt_order))

            subject_colors = {
                sid: color_classes[unique_subject_ids.index(sid)]
                for sid in unique_subject_ids
            }
            total_time_bar_width = round(analysis['total_time'] *
                                         (MAXIMUM_TIME_WIDTH / MAXIMUM_TIME))
            subject_attempt_order = [{
                'name':
                ontology[sid].name,
                'id':
                sid,
                'color_class':
                subject_colors[sid],
                'width':
                100.0 / len(subject_attempt_order)
            } for sid in subject_attempt_order]
            combined_subjects = []
            last_subject = None
            for i, s in enumerate(subject_attempt_order):
                if last_subject is not None:
                    if last_subject == s['id']:
                        combined_subjects[-1]['width'] += s['width']
                    else:
                        combined_subjects.append(s)
                        last_subject = s['id']
                else:
                    combined_subjects.append(s)
                    last_subject = s['id']

            subject_attempt_legend = [{
                'name': ontology[sid].name,
                'color': subject_colors[sid]
            } for sid in unique_subject_ids]

            mtqi = json.loads(mock_test.question_ids)
            sorted_subjects = OrderedDict(
                sorted(analysis['subjects'].items(),
                       key=lambda t: mtqi[t[0]]['order']))

            if page == 'page2':
                return render_template(
                    'pdf_report.html',
                    subject_attempt_order=combined_subjects,
                    total_time_bar_width=total_time_bar_width,
                    max_time_width=MAXIMUM_TIME_WIDTH,
                    duration=MAXIMUM_TIME,
                    subject_attempt_legend=subject_attempt_legend,
                    sorted_subjects=sorted_subjects,
                    **common_page_vars)

        if page in (None, 'page3', 'page4'):
            answers = json.loads(attempted_mock_test.answers)
            question_ids = answers.keys()
            questions = {
                q.id: q
                for q in Question.get_filtertered_list(
                    include_question_ids=question_ids)['questions']
            }

            # dictionary with string value of duration as key and value as question id
            durations_dict = {}
            # list with durations of questions
            durations_list = []

            for question_id, value in answers.items():
                for duration in value['durations']:
                    duration_key = self.get_duration_key(duration)
                    if duration_key is not None:
                        durations_dict[duration_key] = int(question_id)
                        durations_list.append(duration)

            sorted_durations_list = sorted(durations_list, key=lambda d: d[0])
            if mock_test.duration <= 7200:
                UNIT_TIME_DURATION = 600  # seconds
            else:
                UNIT_TIME_DURATION = 900  # seconds
            UNIT_TIME_DURATION = 600
            # ordered list of list of questions attempted every `UNIT_TIME_DURATION`
            question_attempt_order = []

            for current_time_window_start in xrange(
                    0, int(math.ceil(analysis['total_time'])),
                    UNIT_TIME_DURATION):
                current_time_window_end = current_time_window_start + UNIT_TIME_DURATION
                i = -1
                j = -1
                for index, duration in enumerate(sorted_durations_list):
                    # if current_time_window_start lies in the current duration
                    if duration[0] <= current_time_window_start < duration[1]:
                        i = index
                    # if current_time_window_end lies in the current duration
                    if duration[0] < current_time_window_end <= duration[1]:
                        j = index
                        break

                # if time window start and end lie inside test duration
                if i != -1 and j != -1:
                    qs = []
                    for d in sorted_durations_list[i:j + 1]:
                        question_id = durations_dict[self.get_duration_key(d)]
                        qs.append(question_id)
                    question_attempt_order.append(qs)

                # if time window start lies inside test duration but time window end does not
                elif i != -1 and j == -1:
                    qs = []
                    for d in sorted_durations_list[i:]:
                        question_id = durations_dict[self.get_duration_key(d)]
                        qs.append(question_id)
                    question_attempt_order.append(qs)

            test_duration_minutes = mock_test.duration / 60.0
            if page in (None, 'page3'):
                difficulty_map = {
                    '1': 'Easy',
                    '2': 'Easy',
                    '3': 'Medium',
                    '4': 'Medium',
                    '5': 'Hard'
                }
                # ordered list of list of difficulty counts every `UNIT_TIME_DURATION`
                difficulty_attempt_order = []
                for question_list in question_attempt_order:
                    easy = {'count': 0, 'marks': 0}
                    medium = {'count': 0, 'marks': 0}
                    hard = {'count': 0, 'marks': 0}
                    for qid in question_list:
                        dif = difficulty_map[questions[qid].difficulty]
                        if dif == 'Easy':
                            easy['count'] += 1
                            easy['marks'] += answers[str(qid)]['marks']
                        if dif == 'Medium':
                            medium['count'] += 1
                            medium['marks'] += answers[str(qid)]['marks']
                        if dif == 'Hard':
                            hard['count'] += 1
                            hard['marks'] += answers[str(qid)]['marks']
                    difficulty_attempt_order.append({
                        'minutes': (len(difficulty_attempt_order) + 1) *
                        (UNIT_TIME_DURATION / 60),
                        'easy':
                        easy,
                        'medium':
                        medium,
                        'hard':
                        hard
                    })

                while len(difficulty_attempt_order
                          ) < 210 * 60 / UNIT_TIME_DURATION:
                    difficulty_attempt_order.append({
                        'minutes': (len(difficulty_attempt_order) + 1) *
                        (UNIT_TIME_DURATION / 60),
                        'easy': {
                            'count': 0,
                            'marks': 0
                        },
                        'medium': {
                            'count': 0,
                            'marks': 0
                        },
                        'hard': {
                            'count': 0,
                            'marks': 0
                        }
                    })

                if page == 'page3':
                    return render_template(
                        'pdf_report.html',
                        difficulty_attempt_order=difficulty_attempt_order,
                        test_duration_minutes=test_duration_minutes,
                        **common_page_vars)

            if page in (None, 'page4'):
                # ordered list of list of attempt quality counts every `UNIT_TIME_DURATION`
                aq_attempt_order = []
                for question_list in question_attempt_order:
                    perfect = {'count': 0, 'marks': 0}
                    overtime = {'count': 0, 'marks': 0}
                    wasted = {'count': 0, 'marks': 0}
                    completely_wasted = {'count': 0, 'marks': 0}
                    for qid in question_list:
                        if qid in analysis['perfect']:
                            perfect['count'] += 1
                            perfect['marks'] += answers[str(qid)]['marks']
                        if qid in analysis['overtime']:
                            overtime['count'] += 1
                            overtime['marks'] += answers[str(qid)]['marks']
                        if qid in analysis['wasted']:
                            wasted['count'] += 1
                            wasted['marks'] += answers[str(qid)]['marks']
                        if qid in analysis['completely_wasted']:
                            completely_wasted['count'] += 1
                            completely_wasted['marks'] += answers[str(
                                qid)]['marks']
                    aq_attempt_order.append({
                        'minutes': (len(aq_attempt_order) + 1) *
                        (UNIT_TIME_DURATION / 60),
                        'perfect':
                        perfect,
                        'overtime':
                        overtime,
                        'wasted':
                        wasted,
                        'completely_wasted':
                        completely_wasted
                    })

                while len(aq_attempt_order) < 210 * 60 / UNIT_TIME_DURATION:
                    aq_attempt_order.append({
                        'minutes': (len(aq_attempt_order) + 1) *
                        (UNIT_TIME_DURATION / 60),
                        'perfect': {
                            'count': 0,
                            'marks': 0
                        },
                        'overtime': {
                            'count': 0,
                            'marks': 0
                        },
                        'wasted': {
                            'count': 0,
                            'marks': 0
                        },
                        'completely_wasted': {
                            'count': 0,
                            'marks': 0
                        }
                    })

                if page == 'page4':
                    return render_template(
                        'pdf_report.html',
                        aq_attempt_order=aq_attempt_order,
                        test_duration_minutes=test_duration_minutes,
                        **common_page_vars)

        if page in (None, 'page5'):
            optimum_accuracy = 40  # percent
            spent_time = {}
            answers = json.loads(attempted_mock_test.answers)
            for subject_id in analysis['subjects']:
                if subject_id not in spent_time:
                    spent_time[subject_id] = {
                        'name': ontology[int(subject_id)].name,
                        'correct': 0,
                        'incorrect': 0,
                        'not_attempted': 0,
                        'total_time': analysis['subjects'][subject_id]['time']
                    }
                for q_id in analysis['subjects'][subject_id]['correct']:
                    spent_time[subject_id]['correct'] += answers[str(
                        q_id)]['time']
                for q_id in analysis['subjects'][subject_id]['incorrect']:
                    spent_time[subject_id]['incorrect'] += answers[str(
                        q_id)]['time']
                for q_id in analysis['subjects'][subject_id]['not_attempted']:
                    spent_time[subject_id]['not_attempted'] += answers[str(
                        q_id)]['time']
            if page == 'page5':
                return render_template('pdf_report.html',
                                       optimum_accuracy=optimum_accuracy,
                                       spent_time=spent_time,
                                       **common_page_vars)

        return render_template(
            'pdf_report.html',
            subject_attempt_order=combined_subjects,
            total_time_bar_width=total_time_bar_width,
            max_time_width=MAXIMUM_TIME_WIDTH,
            duration=MAXIMUM_TIME,
            subject_attempt_legend=subject_attempt_legend,
            sorted_subjects=sorted_subjects,
            test_duration_minutes=test_duration_minutes,
            spent_time=spent_time,
            difficulty_attempt_order=difficulty_attempt_order,
            aq_attempt_order=aq_attempt_order,
            optimum_accuracy=optimum_accuracy,
            **common_page_vars)
示例#10
0
    def put(self, *args, **kwargs):
        parser = reqparse.RequestParser()
        parser.add_argument('name', type=str)
        parser.add_argument('is_locked', type=int, choices=[0, 1])
        parser.add_argument('question_ids',
                            type=MockTestList.question_ids_json_type)
        args = parser.parse_args()

        mock_test = MockTestModel.query.get(kwargs['id'])
        if mock_test is None:
            raise InvalidMockTestId

        if args['name'] is not None:
            mock_test.name = args['name']

        if args['question_ids'] is not None:
            args['question_ids'] = json.loads(args['question_ids'])

            for subject_id, data in args['question_ids'].items():
                seen_comprehensions = OrderedDict()
                comp_ques_ids = []
                q_ids = data['q_ids']
                question_data = Question.get_filtertered_list(
                    include_question_ids=q_ids)
                questions = question_data['questions']
                total = question_data['total']

                if total != len(q_ids):
                    raise InvalidQuestionId

                # sort questions in the order in which they appear in `q_ids`
                questions = sorted(questions, key=lambda q: q_ids.index(q.id))

                for question in questions:
                    if question.comprehension_id is not None:
                        # if comprehension not encountered before
                        if question.comprehension_id not in seen_comprehensions:
                            comp_ques_ids.append(question.id)
                            # comprehension questions in order of their ids, i.e order of their addition
                            comprehension_ques_ids = [
                                q.id for q in sorted(
                                    question.comprehension.questions.all(),
                                    key=lambda q: q.id)
                            ]
                            seen_comprehensions[
                                question.comprehension_id] = sorted(
                                    comprehension_ques_ids)

                i = 0
                for comp_id, ques_ids in seen_comprehensions.items():
                    ques_id_set = set(ques_ids)
                    ques_id_set.remove(comp_ques_ids[i])
                    # questions ids other than the first encountered question of this comprehension
                    other_comp_ques_ids = ques_id_set
                    # remove qny question ids from `other_comp_ques_ids` if present in `q_ids`
                    for id in other_comp_ques_ids:
                        try:
                            q_ids.remove(id)
                        except:
                            continue
                    # index of first encountered question of this comprehension
                    comp_ques_index = q_ids.index(comp_ques_ids[i])
                    # add all questions of this comprehension to `q_ids` starting from `comp_ques_index`
                    for index, id in enumerate(ques_ids):
                        q_ids.insert(comp_ques_index + index, id)
                    # finally remove the first encountered question of this comprehension from `q_ids`
                    q_ids.remove(q_ids[comp_ques_index + index + 1])
                    i += 1

            mock_test.question_ids = json.dumps(args['question_ids'])

        if args['is_locked'] is not None:
            mock_test.is_locked = args['is_locked'] == 1

        db.session.commit()

        return {'mock_test': mock_test}