def test_solution(self):
        
        self.qsa1.question_set=3
        self.qsa1.save()
        self.qsa2.question_set=7
        self.qsa2.save()
        self.qsa3.question_set=3
        self.qsa3.save()
        self.qsa4.question_set=7
        self.qsa4.save()
        
        valid_options=[[self.q1,self.q2],[self.q2,self.q1],[self.q1,self.q4],[self.q4,self.q1],
                       [self.q3,self.q2],[self.q2,self.q3],[self.q3,self.q4],[self.q4,self.q3]]


        qs = [self.q1, self.q2, self.q3, self.q4]

        for j in range(3):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            question_list = render_question_list(
                self.asmt, rng=self.rng, question_list = question_list,
                assessment_seed=get_new_seed(self.rng), solution=True)
            questions = [ql['question'] for ql in question_list]

            self.assertTrue(questions in valid_options)
            
            for (i,question_dict) in enumerate(question_list):
                self.assertEqual(
                    question_dict['question_data']['rendered_text'],
                    "Question number %i solution."
                    % (qs.index(question_dict['question'])+1))
    def test_no_question_groups_all_orders(self):
        self.qsa4.delete()
        
        qs = [self.q1, self.q2, self.q3, self.q4]
        valid_orders = []
        orders_used = []
        for i in range(3):
            for j in range(3):
                if i==j:
                    continue
                for k in range(3):
                    if k==i or k==j:
                        continue
                    valid_orders.append([qs[i], qs[j], qs[k]])
                    orders_used.append(False)

        for i in range(200):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            question_list = render_question_list(
                self.asmt, rng=self.rng, question_list = question_list,
                assessment_seed=get_new_seed(self.rng))
            for question_dict in question_list:
                self.assertEqual(question_dict['relative_weight'],1/3)
                self.assertFalse(question_dict['previous_same_group'])
            questions = [ql['question'] for ql in question_list]
            self.assertTrue(questions in valid_orders)
            one_used = valid_orders.index(questions)
            orders_used[one_used]=True
            
            if False not in orders_used:
                break

        self.assertTrue(False not in orders_used)
    def test_multiple_in_question_set(self):
        
        self.qsa1.question_set=3
        self.qsa1.save()
        self.qsa2.question_set=7
        self.qsa2.save()
        self.qsa3.question_set=3
        self.qsa3.save()
        self.qsa4.question_set=7
        self.qsa4.save()
        
        valid_options=[[self.q1,self.q2],[self.q2,self.q1],[self.q1,self.q4],[self.q4,self.q1],
                       [self.q3,self.q2],[self.q2,self.q3],[self.q3,self.q4],[self.q4,self.q3]]

        options_used = [False, False, False, False,
                        False, False, False, False]

        for j in range(200):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            question_list = render_question_list(
                self.asmt, rng=self.rng, question_list = question_list,
                assessment_seed=get_new_seed(self.rng))
            questions = [ql['question'] for ql in question_list]
            
            self.assertTrue(questions in valid_options)
            
            one_used = valid_options.index(questions)
            options_used[one_used]=True
            
            if False not in options_used:
                break

        self.assertTrue(False not in options_used)
    def test_with_weight(self):
        
        self.qsa1.question_set=3
        self.qsa1.save()
        self.qsa4.question_set=2
        self.qsa4.save()

        self.asmt.questionsetdetail_set.create(question_set=3,
                                               weight = 5)
        self.asmt.questionsetdetail_set.create(question_set=2,
                                               weight = 7.3)

        valid_options = [{self.q2,self.q1},{self.q2,self.q3},{self.q4,self.q1},{self.q4,self.q3}]

        options_used = [False, False, False, False]

        for i in range(100):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            
            relative_weights = {ql['relative_weight'] for ql in question_list}
            self.assertEqual(relative_weights, {5/12.3, 7.3/12.3})

            questions = {ql['question'] for ql in question_list}
            self.assertTrue(questions in valid_options)

            one_used = valid_options.index(questions)
            options_used[one_used]=True
            
            if False not in options_used:
                break

        self.assertTrue(False not in options_used)
    def test_multiple_questions_per_question_set(self):
        self.qsa1.question_set=2
        self.qsa1.save()
        self.qsa3.question_set=4
        self.qsa3.save()

        valid_options = [{self.q1,self.q3},{self.q1,self.q4},{self.q2,self.q3},{self.q2,self.q4}]

        options_used = [False, False, False, False]

        for i in range(100):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            questions = {ql['question'] for ql in question_list}
            self.assertTrue(questions in valid_options)

            one_used = valid_options.index(questions)
            options_used[one_used]=True
            
            if False not in options_used:
                break

        self.assertTrue(False not in options_used)


        self.qsa1.question_set=4
        self.qsa1.save()

        valid_options = [{self.q2,self.q1},{self.q2,self.q3},{self.q2,self.q4}]

        options_used = [False, False, False]

        for i in range(100):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            questions = {ql['question'] for ql in question_list}
            self.assertTrue(questions in valid_options)

            one_used = valid_options.index(questions)
            options_used[one_used]=True
            
            if False not in options_used:
                break

        self.assertTrue(False not in options_used)
    def test_groups_fixed_order(self):
        self.asmt.fixed_order=True
        self.asmt.save()

        self.asmt.questionsetdetail_set.create(question_set=1,
                                               group="apple")
        self.asmt.questionsetdetail_set.create(question_set=4,
                                               group="apple")
        for i in range(3):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            question_list = render_question_list(
                self.asmt, rng=self.rng, question_list = question_list,
                assessment_seed=get_new_seed(self.rng))
            questions = [ql['question'] for ql in question_list]
            self.assertEqual(questions, [self.q1,self.q2,self.q3,self.q4])
            psg = [ql['previous_same_group'] for ql in question_list]
            self.assertEqual(psg, [False,False,False,False])
            groups = [ql['group'] for ql in question_list]
            self.assertEqual(groups[0], "apple")
            self.assertEqual(groups[3], "apple")


        self.asmt.questionsetdetail_set.create(question_set=2,
                                               group="appl")
        self.asmt.questionsetdetail_set.create(question_set=3,
                                               group="appl")

        for i in range(3):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            question_list = render_question_list(
                self.asmt, rng=self.rng, question_list = question_list,
                assessment_seed=get_new_seed(self.rng))
            questions = [ql['question'] for ql in question_list]
            self.assertEqual(questions, [self.q1,self.q2,self.q3,self.q4])
            psg = [ql['previous_same_group'] for ql in question_list]
            self.assertEqual(psg, [False,False,True,False])
            groups = [ql['group'] for ql in question_list]
            self.assertEqual(groups, ["apple", "appl", "appl", "apple"])
    def test_no_question_groups_fixed_order(self):
        self.asmt.fixed_order=True
        self.asmt.save()

        qs = [self.q1, self.q2, self.q3, self.q4]
        for j in range(10):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            question_list = render_question_list(
                self.asmt, rng=self.rng, question_list = question_list,
                assessment_seed=get_new_seed(self.rng))
            for (i,question_dict) in enumerate(question_list):
                self.assertEqual(question_dict['question'], qs[i])
                self.assertEqual(question_dict['question_set'], i+1)
                self.assertEqual(question_dict['relative_weight'],1/4)
                qseed = int(question_dict['seed'])
                self.assertTrue(qseed >= 0 and qseed < 100000000000)
                self.assertEqual(question_dict['group'],"")
                self.assertEqual(
                    question_dict['question_data']['rendered_text'],
                    "Question number %s text." % (i+1))
                self.assertFalse(question_dict['previous_same_group'])
Example #8
0
    def render(self, context):
        # check if blank_style is set to 1
        blank_style=0
        try:
            blank_style = template.Variable("blank_style").resolve(context)
        except template.VariableDoesNotExist:
            pass
        
        if self.seed is None:
            seed=None
        else:
            seed = self.seed.resolve(context)
        video_code = self.video_code.resolve(context)
        # test if video with video_code exists
        try:
            thevideo=Video.objects.get(code=video_code)
        # if video does not exist, mark as broken. 
        except ObjectDoesNotExist:

            # if blank style,
            # and show that it is broken so can search for it
            if(blank_style):
                return " BRKNQST "
            else:
                return "<p>[Broken Question, video not found]</p>"

        # render for each question associated with video
        html_string=""

        try:
            rng = context['_answer_data_']['rng']
        except:
            import random
            rng = random.Random()

        if seed is not None:
            rng.seed(seed)

        for videoquestion in thevideo.videoquestion_set.all():
            question = videoquestion.question
            question_seed = get_new_seed(rng)
            html_string += _render_question(question, rng=rng, seed=seed, 
                                            context=context)

        return html_string
    def test_one_question_per_question_set(self):
        
        for i in range(10):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            questions = {ql['question'] for ql in question_list}
            self.assertEqual(questions, {self.q1, self.q2, self.q3, self.q4})
            
            question_sets = sorted([ql['question_set'] for ql in question_list])
            self.assertEqual(question_sets, [1,2,3,4])

            relative_weights = [ql['relative_weight'] for ql in question_list]
            self.assertEqual(relative_weights, [1/4,1/4,1/4,1/4])
            
            for ql in question_list:
                seed = ql['seed']
                self.assertTrue(int(seed) >= 0)
                self.assertTrue(int(seed) <= 1000000000000)
Example #10
0
    def render(self, context):
        # check if blank_style is set to 1
        blank_style=0
        try:
            blank_style = template.Variable("blank_style").resolve(context)
        except template.VariableDoesNotExist:
            pass
        
        question_id = self.question_id.resolve(context)
        if self.seed is None:
            seed=None
        else:
            seed = self.seed.resolve(context)
        
        # test if question with question_id exists
        try:
            thequestion=Question.objects.get(id=question_id)
        # if question does not exist, mark as broken. 
        except ObjectDoesNotExist:

            # if blank style,
            # and show that it is broken so can search for it
            if(blank_style):
                return " BRKNQST "
            else:
                return "<p>[Broken Question, question not found]</p>"
            
        try:
            rng = context['_answer_data_']['rng']
        except:
            import random
            rng = random.Random()


        if seed is None:
            seed = get_new_seed(rng)
            
        return _render_question(thequestion, rng=rng, seed=seed, 
                                context=context)
    def test_groups_random_order(self):
        self.asmt.questionsetdetail_set.create(question_set=1,
                                               group="apple")
        self.asmt.questionsetdetail_set.create(question_set=4,
                                               group="apple")
        qs = [self.q1, self.q2, self.q3, self.q4]
        for j in range(10):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            question_list = render_question_list(
                self.asmt, rng=self.rng, question_list = question_list,
                assessment_seed=get_new_seed(self.rng))
            hit_first_group_member=False
            expected_next_group_member=None
            for (i,question_dict) in enumerate(question_list):
                if hit_first_group_member:
                    self.assertTrue(question_dict['previous_same_group'])
                    self.assertEqual(question_dict['group'],'apple')
                    self.assertEqual(qs.index(question_dict['question'])+1, 
                                     expected_next_group_member)
                    hit_first_group_member = False
                        
                else:
                    self.assertFalse(question_dict['previous_same_group'])
                    if question_dict['group'] == 'apple':
                        hit_first_group_member = True
                        if qs.index(question_dict['question']) == 0:
                            expected_next_group_member = 4
                        else:
                            expected_next_group_member = 1

                self.assertEqual(
                    question_dict['question_data']['rendered_text'],
                    "Question number %i text." % (qs.index(question_dict['question'])+1))


        self.asmt.questionsetdetail_set.create(question_set=2,
                                               group="appl")
        self.asmt.questionsetdetail_set.create(question_set=3,
                                               group="appl")

        for j in range(10):
            question_list = get_question_list(self.asmt, rng=self.rng,
                                              seed=get_new_seed(self.rng))
            question_list = render_question_list(
                self.asmt, rng=self.rng, question_list = question_list,
                assessment_seed=get_new_seed(self.rng))
            hit_first_group_member=False
            expected_next_group_member=None
            for (i,question_dict) in enumerate(question_list):
                if hit_first_group_member:
                    self.assertTrue(question_dict['previous_same_group'])
                    self.assertEqual(question_dict['group'],group_found)
                    self.assertEqual(qs.index(question_dict['question'])+1, 
                                     expected_next_group_member)
                    hit_first_group_member = False
                        
                else:
                    self.assertFalse(question_dict['previous_same_group'])
                    group_found = question_dict['group']
                    if group_found == 'apple':
                        hit_first_group_member = True
                        if qs.index(question_dict['question']) == 0:
                            expected_next_group_member = 4
                        else:
                            expected_next_group_member = 1
                    elif group_found == 'appl':
                        hit_first_group_member = True
                        if qs.index(question_dict['question']) == 1:
                            expected_next_group_member = 3
                        else:
                            expected_next_group_member = 2
                        
                self.assertEqual(
                    question_dict['question_data']['rendered_text'],
                    "Question number %s text." % (qs.index(question_dict['question'])+1))
Example #12
0
    def form_valid(self, form):

        include_list= form.cleaned_data['include_list']
        avoid_list= form.cleaned_data['avoid_list']
        seed = form.cleaned_data['seed']
        version = form.cleaned_data['version_description']

        if seed == "":
            seed = str(timezone.now())

        assessment_datetime = form.cleaned_data['assessment_datetime']

        current_tz = timezone.get_current_timezone()
        assessment_datetime = current_tz.normalize(
            assessment_datetime.astimezone(current_tz))
        assessment_date = assessment_datetime.date()

        
        include_dict={}
        if include_list:
            for item in include_list.split(","):
                ind = int(item)
                include_dict[ind] = include_dict.get(ind,0)+1

        avoid_dict={}
        if avoid_list:
            for item in avoid_list.split(","):
                ind = int(item)
                avoid_dict[ind] = avoid_dict.get(ind,0)+1

        from micourses.models import ContentAttempt
                
        for cca_id in self.coursewide_attempts_include:
            try:
                cca = ContentAttempt.objects.get(id=cca_id)
            except ContentAttempt.DoesNotExist:
                continue

            if cca.record.content != self.thread_content:
                continue

            for qs in cca.question_sets.all():
                try:
                    qa = qs.question_attempts.latest()
                except ObjectDoesNotExist:
                    continue
                ind = qa.question.id
                include_dict[ind] = include_dict.get(ind,0)+1

        for cca_id in self.coursewide_attempts_avoid:
            try:
                cca = ContentAttempt.objects.get(id=cca_id)
            except ContentAttempt.DoesNotExist:
                continue

            if cca.record.content != self.thread_content:
                continue

            for qs in cca.question_sets.all():
                try:
                    qa = qs.question_attempts.latest()
                except ObjectDoesNotExist:
                    continue
                ind = qa.question.id
                avoid_dict[ind] = avoid_dict.get(ind,0)+1


        if include_dict or avoid_dict:
            new_seed = self.assessment.include_avoid_question_seed(
                include_dict=include_dict, avoid_dict=avoid_dict, 
                start_seed=seed)
        else:
            from mitesting.utils import get_new_seed
            new_seed=get_new_seed(seed=seed)


        if not version:
            version = new_seed[-3:]

        try:
            course_record = self.thread_content.contentrecord_set\
                                    .get(enrollment = None)
        except ObjectDoesNotExist:
            with transaction.atomic(), reversion.create_revision():
                course_record = self.thread_content.contentrecord_set\
                                    .create(enrollment = None)

        # create new course attempt
        # in case get deadlock, try five times
        for trans_i in range(5):
            try:
                with transaction.atomic(), reversion.create_revision():
                    course_attempt = course_record.attempts.create(
                        seed=new_seed, valid=True,
                        attempt_began = assessment_datetime,
                        version=version)

                    from micourses.render_assessments import get_question_list
                    question_list = get_question_list(
                        self.assessment, seed=new_seed,
                        thread_content=self.thread_content)

                    # create the content question sets and question attempts
                    with transaction.atomic(), reversion.create_revision():
                        for (i,q_dict) in enumerate(question_list):
                            ca_question_set = \
                                course_attempt.question_sets.create(
                                    question_number=i+1,
                                    question_set=q_dict['question_set'])
                            qa=ca_question_set.question_attempts.create(
                                question=q_dict['question'],
                                seed=q_dict['seed'])
                            q_dict['question_attempt'] = qa

            except OperationalError:
                if trans_i==4:
                    raise
            else:
                break

        new_url = "%s?content_attempt=%s&date=%s" % \
                  (reverse('miassess:assessment', 
                           kwargs={'course_code': self.course.code,
                                   'assessment_code': self.assessment.code}),
                   course_attempt.id, assessment_datetime)


        return HttpResponseRedirect(new_url)
Example #13
0
    def get_context_data(self, **kwargs):
        context = super(AssessmentView, self).get_context_data(**kwargs)


        from midocs.functions import return_new_auxiliary_data
        auxiliary_data =  return_new_auxiliary_data()
        context['_auxiliary_data_'] = auxiliary_data

        import random
        rng=random.Random()

        # show post user response errors only if instructor permissions
        if user_has_given_assessment_permission_level(
                self.request.user, 2):
            show_post_user_errors=True
        else:
            show_post_user_errors=False

        from micourses.render_assessments import render_question_list
        rendered_list=render_question_list(
            self.assessment, self.question_list, rng=rng, 
            assessment_seed=self.assessment_seed, 
            user=self.request.user, 
            solution=self.solution,
            auxiliary_data = auxiliary_data,
            show_post_user_errors=show_post_user_errors)

        # if question_only is set, then view only that question
        if self.kwargs.get('question_only'):
            question_only = int(self.kwargs['question_only'])
            rendered_list=rendered_list[question_only-1:question_only]
            context['question_only'] = question_only
        context['rendered_list'] = rendered_list

        context['seed'] = self.assessment_seed

        # determine if there were any errors
        success=True
        question_errors=[]
        for (ind,q) in enumerate(rendered_list):
            if not q["question_data"]["success"]:
                success=False
                question_errors.append(str(ind+1))
        if not success:
            context['error_message'] = \
                "Errors occurred in the following questions: %s" %\
                ", ".join(question_errors)

        context['success'] = success
        
        context['generate_course_attempt_link'] = False
        context['show_solution_link'] = False

        course = self.assessment.course
        context['course'] = course
        
        if user_can_administer_assessment(self.request.user, course=course):
            if self.thread_content:
                context['generate_course_attempt_link'] = True
            if not self.solution:
                context['show_solution_link'] = True

        if self.thread_content:
            context['assessment_name'] = self.thread_content.get_title()
        else:
            context['assessment_name'] = self.assessment.name
        if self.solution:
            context['assessment_name'] += " solution"
        context['assessment_short_name'] = self.assessment.return_short_name()
        if self.solution:
            context['assessment_short_name'] += " sol."

        if self.version:
            context['version'] =  self.version
            context['assessment_name_with_version'] = "%s, version %s" % \
                        (context['assessment_name'], context['version'])
            context['assessment_short_name_with_version'] = "%s, version %s" % \
                        (context['assessment_short_name'], context['version'])
        else:
            context['version'] = ''
            context['assessment_name_with_version'] = context['assessment_name']
            context['assessment_short_name_with_version'] \
                = context['assessment_short_name']

        if self.course_enrollment and self.thread_content:
            if self.course_enrollment.role == STUDENT_ROLE and self.current_attempt:
                due = self.thread_content.get_adjusted_due(
                    self.current_attempt.record)

                if course.adjust_due_attendance and due:
                    due_date_url = reverse(
                        'micourses:adjusted_due_calculation',
                        kwargs={'course_code': course.code,
                                'content_id': self.thread_content.id }
                    )
                    from micourses.utils import format_datetime
                    current_tz = timezone.get_current_timezone()
                    due_string = format_datetime(current_tz.normalize(
                        due.astimezone(current_tz)))
                    due = mark_safe('<a href="%s">%s</a>' % \
                                               (due_date_url, due_string))
                context['due'] = due
            else:
                context['due'] = self.thread_content.get_adjusted_due()

        context['thread_content'] = self.thread_content
        context['number_in_thread'] = self.number_in_thread
        context['current_attempt'] = self.current_attempt

        context['users attempt'] = False
        context['multiple_attempts'] = False
        context['attempt_url']=None
        context['record_url']=None


        # set date from current_attempt, else as now
        if self.current_attempt:
            context['assessment_date'] = self.current_attempt.attempt_began
        else:
            context['assessment_date'] = timezone.now()


        # Check if have current attempt that belongs to user
        # (so can show score)
        # Create links to record and attempts (if valid)

        if self.current_attempt and \
           self.current_attempt.record.enrollment == self.course_enrollment:

            context['users_attempt'] = True

            valid_attempt_list = list(
                self.current_attempt.record.attempts.filter(valid=True))
            context['multiple_attempts'] = len(valid_attempt_list)>1

            context['record_url'] = reverse(
                'micourses:content_record',
                kwargs={'course_code': course.code,
                        'content_id': self.thread_content.id})

            if self.current_attempt.valid:
                attempt_number = valid_attempt_list.index(self.current_attempt)\
                                 +1
                context['attempt_url'] = reverse(
                    'micourses:content_attempt', 
                    kwargs={'course_code': course.code,
                            'content_id': self.thread_content.id,
                            'attempt_number': attempt_number})

                # add question attempt urls to rendered_list question_data
                for (ind,q) in enumerate(rendered_list):
                    q["question_data"]["attempt_url"] = reverse(
                        'micourses:question_attempts', 
                        kwargs={'course_code': course.code, 
                                'content_id': self.thread_content.id, 
                                'attempt_number': attempt_number, 
                                'question_number': ind+1} )



        from mitesting.utils import round_and_int
        if self.thread_content:
            context['thread_content_points'] = round_and_int(
                self.thread_content.points)
        if self.current_attempt is None or self.current_attempt.score is None:
            context['attempt_score']=0
        else:
            context['attempt_score']=round_and_int(
                self.current_attempt.score,1)
        
        if self.current_attempt is None or \
           self.current_attempt.record.score is None:
            context['content_score']=0
        else:
            context['content_score']=round_and_int(
                self.current_attempt.record.score,1)


        # get list of the question numbers in assessment
        # if instructor or designer in course
        # if also staff, include links to admin pages
        if user_can_administer_assessment(self.request.user, course=course):
            question_numbers=[]
            if self.request.user.is_staff:
                context['assessment_admin_link'] = mark_safe(
                    "<p><a href='%s'>%s</a></p>" % (
                        reverse('admin:micourses_assessment_change',
                                args=(self.assessment.id,)),
                        'Admin link'))
            for q in rendered_list:
                # if staff, add link to admin page for quesiton
                if self.request.user.is_staff:
                    question_numbers.append(
                        "<a href='%s'>%s</a>" % (
                            reverse('admin:mitesting_question_change',
                                    args=(q['question'].id,)),
                            q['question'].id)
                    )
                else:
                    question_numbers.append(str(q['question'].id))
            question_numbers = ", ".join(question_numbers)
            question_numbers = mark_safe(question_numbers)
        else:
            question_numbers=None
        context['question_numbers']=question_numbers

        # turn off Google analytics for localhost/development site
        context['noanalytics']=(settings.SITE_ID <= 2)

        from mitesting.utils import get_new_seed
        context['new_seed']=get_new_seed(rng)

        return context
Example #14
0
def render_question(question_dict, rng, solution=False, 
                    question_identifier="",
                    user=None, show_help=True,
                    assessment=None, 
                    assessment_seed=None, 
                    readonly=False, auto_submit=False, 
                    record_response=True,
                    allow_solution_buttons=False,
                    auxiliary_data=None,
                    show_post_user_errors=False,
                ):

    """
    Render question or solution by compiling text in expression context

    The rendering of the question is done in three steps
    1.  Evaluate all expressions to create the expression context
    2.  Render templates of question or solution text, including subparts
    3.  If question is computer graded, set up conditions for submitting
        and recording answer.

    Input arguments
    - question: the Question instance to be rendered
    - rng: the random number generator instance to use
    - seed: the random generator seed
      Used for setting up the expression context.
      If seed is none, then randomly generate a seed, recording the new
      seed so that exact version can be reproduced by passing seed in
    - solution: if true, generate the solution.  Else generate the question.
    - question_identifier: should be a string that uniquely identifies
      this particular question among any others on the page
    - user: a User instance.  Used to determine if solution is viewable
      and for recording answers of computer graded questions
    - show_help: if true, show help (hints and reference pages).
    - assessment: if not None, indicates the Assessment instance
      in which question is being rendered.  Used to determine if solution is
      visible and for recording answers of computer graded questions
    - question_set: which assessment question_set the question belongs to.
      Used for recording answers of computer graded questions
    - assessment_seed: which assessment seed was used to generate assessment.
      Used for recording answers of computer graded questions
    - prefilled_responses: a list containing respones for answer blanks.
      Useful for redisplaying student answers
    - readonly: if true, then all answer blanks are readonly.
      Useful with prefilled answers.
    - auto_submit: automatically submit answers (instead of submit button)
      Useful with prefilled answers
    - record_response: if true, record answer upon submit
    - allow_solution_buttons: if true, allow a solution button to be displayed
      on computer graded questions
    - auxiliary_data: dictionary for information that should be accessible 
      between questions or outside questions.  Used, for example, 
      for information about applets and hidden sections embedded in text
    - show_post_user_errors: if true, display errors when evaluating
      expressions flagged as being post user response.  Even if showing
      errors, such an error does not cause the rendering success to be False
    - random_outcomes: dictionary keyed by expression id that specify
      the random results should obtain.  If valid and no failed condition, 
      then random number generator is not used.

    The output is a question_data dictionary.  With the exception of
    question, success, rendered_text, and error_message, all entries
    are optional.  The entries are
    - question: the question that was rendered
    - success: true if question rendered without errors.
      If false, rendered text will still show as much of the question
      as was processed, but submit_button will not be set
    - error_message: text explaining all errors encountered
    - rendered_text: the results from rendering the main question text 
    - subparts: a list of dictionaries of results from rendering subparts
      Each dictionary has the following keys:
      - letter: the letter assigned to the supart
      - rendered_text: the results from rendering the subpart text
      - help_available: true if there is help for subpart
      - reference_pages: a list of pages relevant to the subpart
      - hint_text: rendered hint text
   - help_available: true if there is help (hint or links to pages).
      If help_available, then the following
      - reference_pages: a list of pages relevant to the question
      - hint_text: rendered hint text
      - hint_template_error: true if error rendering hint text
    - identifier: the passed in string to identify the question
    - seed: the random number generator seed used to generate question
    - auto_submit: if true, automatically submit answers upon page load
    - submit_button: if true, include button to submit for computer grading
    - show_solution_button: if exists and set to true, then display a
      button to show the solution.  For show_solution_button to be true, 
      allow_solution_button must be true, the user must have permission 
      to view solution of question, and a solution must exist.
      In addition, if assessment is specified, then user must also have
      permission to view solution of assessment for show_solution_button
      to be set to true.
    - enable_solution_button: true if solution button should be enabled
      at the outset.  (Set true if not computer graded.)
    - inject_solution_url: url from which to retrieve solution
    - computer_grade_data: a pickled and base64 encoded dictionary of 
      information about the question to be sent to server with submission
      of results for computer grading.  Some entries are identical to above:
      - seed
      - identifier
      - show_solution_button
      - record_response
      - question_set
      - assessment_seed
      - course_code (of assessment from input)
      - assessment_code (of assessment from input)
      - answer_info: list of codes, points, answer type, identifier, 
        group, assigned expression, prefilled answer, and expression type
        of the answers in question
      - applet_counter: number of applets encountered so far 
        (not sure if need this)
   """


    question = question_dict['question']
    question_set = question_dict.get('question_set')
    seed = question_dict.get("seed")
    question_attempt = question_dict.get("question_attempt")
    response = question_dict.get("response")

    if seed is None:
        from mitesting.utils import get_new_seed
        seed=get_new_seed(rng)

    rng.seed(seed)


    random_outcomes={}

    # if have question attempt, load random outcomes and
    # latest responses from that attempt
    if question_attempt:
        if question_attempt.random_outcomes:
            random_outcomes = json.loads(question_attempt.random_outcomes)

    # if have response, load to be prefilled
    if response:
        prefilled_responses = json.loads(response.response)
    else:
        prefilled_responses = None


    # first, setup context due to expressions from question.
    # include any prefilled responses to answers
    context_results = setup_expression_context(question, rng=rng, seed=seed,
                                            user_responses=prefilled_responses,
                                            random_outcomes=random_outcomes)


    # if failed condition, then don't display the question
    # but instead give message that condition failed
    if context_results.get('failed_conditions'):
        question_data = {
            'question': question,
            'success': False,
            'error_message': mark_safe(
                '<p>'+context_results['failed_condition_message']+'</p>'),
            'rendered_text': mark_safe(
                "<p>Question cannot be displayed"
                + " due to failed condition.</p>"),
            'seed': seed,
        }
        
        # save new seed to question attempt so on next reload,
        # a new seed will be tried.
        if question_attempt and seed==question_attempt.seed:
            question_attempt.seed = context_results["seed"]

            # repeat so that can retry if get transaction deadlock
            for trans_i in range(5):
                try:
                    with transaction.atomic(), reversion.create_revision():
                        question_attempt.save()
                except OperationalError:
                    if trans_i==4:
                        raise
                else:
                    break
            
        return question_data


    # if seed changed from resampling to avoid failed expression conditions
    if seed != context_results["seed"]:
        # if initial question seed matched that from question_attempt,
        # then record updated seed to reduce future resampling
        if question_attempt and seed==question_attempt.seed:
            question_attempt.seed = context_results["seed"]
            # will save changes below

    # set seed to be successful seed from rendering context
    seed = context_results['seed']

    # if have question attempt, save random_outcomes, if changed
    if question_attempt:
        ro_json = json.dumps(random_outcomes)
        if question_attempt.random_outcomes != ro_json:
            question_attempt.random_outcomes = ro_json
            # repeat so that can retry if get transaction deadlock
            for trans_i in range(5):
                try:
                    with transaction.atomic(), reversion.create_revision():
                        question_attempt.save()
                except OperationalError:
                    if trans_i==4:
                        raise
                else:
                    break
    

    # record actual seed used in question_dict
    # not sure if need this
    question_dict['seed']=seed

    render_data = {
        'question': question, 'show_help': show_help, 
        'expression_context': context_results['expression_context'],
        'user': user, 'assessment': assessment
        }

    # Add auxiliary_data to context with key _auxiliary_data_
    # to avoid overwriting expressions
    render_data['expression_context']['_auxiliary_data_'] = auxiliary_data

    # set up dynamic text
    # context variables used for dynamic text tags
    from dynamictext.models import DynamicText
    render_data['expression_context']['_dynamictext_object']=question
    render_data['expression_context']['_dynamictext_instance_identifier']\
        = question_identifier
    # javascript used to update dynamic text
    num_dts = DynamicText.return_number_for_object(question)
    dynamictext_javascript=""
    for i in range(num_dts):
        dt = DynamicText.return_dynamictext(question,i)
        javascript_function=dt.return_javascript_render_function(
            mathjax=True, instance_identifier=question_identifier)
        dynamictext_javascript += "%s_dynamictext_update= %s\n" % \
                                  (dt.return_identifier(question_identifier),
                                   javascript_function)
    if dynamictext_javascript:
        dynamictext_javascript = mark_safe("\n<script>\n%s</script>\n" % \
                                           dynamictext_javascript)
        
    # answer data to keep track of
    # 1. possible answer_codes that are valid
    # 2. the answer_codes that actually appear in the question
    # 3. the multiple choices that actually appear in the question
    (valid_answer_codes, invalid_answers, invalid_answer_messages) =\
        return_valid_answer_codes(question, render_data['expression_context'])

    answer_data = { 'valid_answer_codes': valid_answer_codes,
                    'answer_info': [],
                    'question': question,
                    'question_identifier': question_identifier,
                    'prefilled_responses': prefilled_responses,
                    'readonly': readonly,
                    'error': bool(invalid_answers),
                    'answer_errors': invalid_answer_messages,
                    'rng': rng
                    }

    render_data['expression_context']['_answer_data_']= answer_data

    question_data = render_question_text(render_data, solution=solution)

    question_data.update({
        'identifier': question_identifier,
        'auto_submit': auto_submit,
        'seed': seed,
        'dynamictext_javascript': dynamictext_javascript,
    })

    # if have prefilled responses, check to see that the number matches the
    # number of answer blanks (template tag already checked if
    # the answer_codes matched for those answers that were found)
    # If so, log warning but otherwise ignore.
    if prefilled_responses:
        if len(prefilled_responses) != len(answer_data["answer_info"]):
            message = "Invalid number of previous responses.\nQuestion: %s"\
                      % question
            if assessment:
                message += "\nAssessment: %s" % assessment
            logger.warning(message)
    

    # If render or expression error, combine all error messages
    # for display in question template.
    question_data['error_message'] = ''

    question_data['success'] = True

    # errors from post user expression don't cause success to be marked as false
    # so that one can still submit new responses
    if (context_results.get('error_in_expressions_post_user')
        and show_post_user_errors):
        errors = context_results['expression_error_post_user']
        for expr in errors.keys():
            question_data['error_message'] += '<li>' + \
                    re.sub(r"\n", r"<br/>", errors[expr]) + '</li>'

    if question_data.get('render_error') \
            or context_results.get('error_in_expressions')\
            or answer_data.get('error'):
        # any other error trigger failure
        # which prevents responses from being submitted
        question_data['success']=False
        if context_results.get('error_in_expressions'):
            errors = context_results['expression_error']
            for expr in errors.keys():
                question_data['error_message'] += '<li>' + \
                    re.sub(r"\n", r"<br/>", errors[expr]) + '</li>'
        if question_data.get('render_error'):
            for error_message in question_data["render_error_messages"]:
                question_data['error_message'] += \
                    '<li>%s</li>' % error_message
            del question_data['render_error']
        if answer_data.get('error'):
            for error_message in answer_data['answer_errors']:
                question_data['error_message'] += \
                    '<li>%s</li>' % error_message

    if question_data['error_message']:
        question_data['error_message'] = mark_safe(\
            "<ul>" + question_data['error_message'] + "</ul>")


    # if rendering a solution 
    # return without adding computer grading data or solution buttons
    if solution:
        return question_data
    

    # if have a question attempt, determine credit
    # also score if question_dict contains points for question set
    current_score=None
    if question_attempt:
        from mitesting.utils import round_and_int
        if question_attempt.credit is None:
            current_percent_credit=None
            current_score=0
        else:
            current_percent_credit = round_and_int(question_attempt.credit*100,1)
            current_score = round_and_int(question_attempt.credit*question_dict.get('points',0),2)

    else:
        current_percent_credit = None

    # record information about score and points in question_data
    # so is available in question_body.html template
    question_data['points']=question_dict.get('points')
    question_data['current_score']=current_score
    question_data['current_percent_credit']=current_percent_credit


    # if allow_solution_buttons is true, then determine if
    # solution is visible to user (ie. user has permissions)
    # and solution exists
    
    # solution is visible if user has permisions for question and, 
    # in the case when the question is part of an assessment, 
    # also has permissions for assessment 
    # (not adjusted for privacy of other questions)

    show_solution_button = False
    if allow_solution_buttons:
    
        solution_visible = False
        if render_data.get('user') and \
                question.user_can_view(user=render_data['user'],solution=True):
            if render_data.get('assessment'):
                if render_data['assessment'].user_can_view(
                    user=render_data['user'], solution=True,
                    include_questions=False):
                    solution_visible=True
            else:
                solution_visible=True

        if solution_visible:
            # check if solution text exists in question or a subpart
            solution_exists=bool(question.solution_text)
            if not solution_exists:
                for subpart in question.questionsubpart_set.all():
                    if subpart.solution_text:
                        solution_exists = True
                        break

            if solution_exists:
                show_solution_button=True

    question_data['show_solution_button']=show_solution_button
    if show_solution_button:
        question_data['inject_solution_url'] = reverse(
            'miquestion:injectquestionsolution', kwargs={'question_id': question.id})
        question_data['enable_solution_button'] = not question.computer_graded \
                        or  (question.show_solution_button_after_attempts == 0)

    # if computer graded and answer data available,
    # add submit button (unless auto_submit or error)
    question_data['submit_button'] = question.computer_graded and\
        answer_data['answer_info'] and (not auto_submit) and\
        question_data['success']

    # set up computer grade data to be sent back to server on submit
    # computer grade data contains
    # - information about question (seed, identifier)
    # - information on grading (record answer and allow solution buttons)
    # - information about assessment (code, seed, and question_set)
    # - information about answer blanks found in template (codes and points)
    # - number of applets encountered so far (not sure if need this)

    computer_grade_data = {'seed': seed, 'identifier': question_identifier, 
                           'record_response': record_response,
                           'show_solution_button': show_solution_button}
    if assessment:
        computer_grade_data['course_code'] = assessment.course.code
        computer_grade_data['assessment_code'] = assessment.code
        computer_grade_data['assessment_seed'] = assessment_seed
        if question_set is not None:
            computer_grade_data['question_set'] = question_set

    if question_attempt:
        computer_grade_data['question_attempt_id'] = question_attempt.id
        
    if answer_data['answer_info']:
        computer_grade_data['answer_info'] \
            = answer_data['answer_info']

    # serialize and encode computer grade data to facilitate appending
    # to post data of http request sent when submitting answers
    import pickle, base64
    question_data['computer_grade_data'] = \
        base64.b64encode(pickle.dumps(computer_grade_data))

    return question_data
Example #15
0
def setup_expression_context(question, rng, seed, user_responses=None,
                             random_outcomes={}):
    """
    Set up the question context by parsing all expressions for question.
    Returns context that contains all evaluated expressions 
    with keys given by the expression names.

    Before evaluating expressions, initializes global dictionary
    with allowed sympy commands for the question.

    Random expressions are based on state of random instance rng set by seed.
    If multiple attempts are required to meet all conditions,
    new values of seed are randomly generated for each attempt.
    The successful seed is returned.

    The first step is to evaluate normal expressions, i.e., those that
    have not been flagged as post user response.

    user_responses is a list of dictionaries of user responses to answers
    embedded in the question.  If any answers have been marked to be
    asssigned to expressions, the second step is to parse those responses
    using user_dict for local_dict and assign the result
    to the corresponding expression.

    The third step is to evaluate any expressions flagged as being 
    post user response.

    Both the local_dict and user_dict are added to the
    expression context.

    In addition, if some expressions were EXPRESSION_WITH_ALTERNATES,
    then the following are created:
    - alternate_dicts: a list of local_dicts with different alternates
    - alternate_exprs: a dictionary indexed by expression name, where each
      entry is a list of alternate versions of the expression.  This will 
      be created starting with the first EXPRESSION_WITH_ALTERNATES,
      and will continue being created for all subsequent expressions. 
    - alternate_funcs: a dictionary indexed by expression name, where each
      entry is a list of alternate versions of a FUNCTION.  This will be
      created for all FUNCTIONS once the first EXPRESSION_WITH_ALTERNATES
      is encountered.
    These lists and dictionaries are added to the expression context.

    Return a dictionary with the following:
    - expression_context: a Context() with mappings from the expressions
    - error_in_expressions: True if encountered any errors in normal expressions
    - error_in_expressions_post_user: the same but for post user expressions
    - expression_error: dictionary of error messages from normal expressions
    - expression_error_post_user: the same but for post user expressions
    - failed_conditions: True if failed conditions for all attempts
    - failed_condition_message: message of which expression last failed
    - seed: seed used in last attempt to generate contenxt

    """

    rng.seed(seed)

    max_tries=500
    success=False

    failed_condition_message=""
    failed_conditions=True

    from mitesting.utils import get_new_seed

    for i in range(max_tries):

        if i>0:
            seed=get_new_seed(rng)
            rng.seed(seed)

            # remove any specifications for random outcomes
            # since they caused a failed condition
            random_outcomes.clear()

        expression_context = Context({})
        random_group_indices={}
        error_in_expressions = False
        expression_error = {}

        # initialize global dictionary using the comamnds
        # found in allowed_sympy_commands.
        # Also adds standard symbols to dictionary.
        local_dict = question.return_sympy_local_dict()
        user_dict = question.return_sympy_local_dict(
            user_response=True)
        alternate_dicts = []
        alternate_exprs = {}
        alternate_funcs = {}
        try:

            from mitesting.models import Expression
            # first processes the expressions that aren't flagged
            # as post user response
            for expression in question.expression_set\
                                      .filter(post_user_response=False):

                try:
                    evaluate_results=expression.evaluate(
                        local_dict=local_dict, 
                        user_dict=user_dict,
                        alternate_dicts = alternate_dicts, 
                        random_group_indices=random_group_indices,
                        rng=rng, random_outcomes=random_outcomes)
                # on FailedCondition, reraise to stop evaluating expressions
                except Expression.FailedCondition:
                    raise

                # for any other exception, record exception and
                # allow to continue processing expressions
                except Exception as exc:
                    error_in_expressions = True
                    expression_error[expression.name] = str(exc)
                    expression_context[expression.name] = '??'
                    if expression.expression_type == expression.RANDOM_WORD:
                        expression_context[expression.name + "_plural"] = "??"
                else:
                    # if random word, add singular and plural to context
                    if expression.expression_type == expression.RANDOM_WORD:
                        expression_evaluated\
                            =evaluate_results['expression_evaluated']
                        expression_context[expression.name] \
                            = expression_evaluated[0]
                        expression_context[expression.name + "_plural"] \
                            = expression_evaluated[1]
                    else:
                        expression_context[expression.name] \
                            = evaluate_results['expression_evaluated']
                        # the following lists will be empty until the
                        # first EXPRESSION_WITH_ALTERNATES is encountered
                        alternate_exprs[expression.name] \
                            = evaluate_results['alternate_exprs']
                        alternate_funcs[expression.name] \
                            = evaluate_results['alternate_funcs']

                        the_expr = expression_context[expression.name]

            # if make it through all expressions without encountering
            # a failed condition, then record fact and
            # break out of loop
            failed_conditions = False
            break

        # on FailedCondition, continue loop, but record
        # message in case it is final pass through loop
        except Expression.FailedCondition as exc:
            failed_condition_message = exc.args[0]

    # add state to expression context as convenience to 
    # reset state if not generating regular expression
    # Also, sympy global dict is accessed from template tags
    expression_context['_sympy_local_dict_'] = local_dict
    expression_context['_user_dict_'] = user_dict
    expression_context['_alternate_dicts_'] = alternate_dicts
    expression_context['_alternate_exprs_'] = alternate_exprs
    expression_context['_alternate_funcs_'] = alternate_funcs

    error_in_expressions_post_user = False
    expression_error_post_user = {}

    # if haven't failed conditions, process user responses and 
    # expressions flagged as post user response
    if not failed_conditions:
        # next processes any user responses
        # that are assigned to expressions
        from mitesting.sympy_customized import EVALUATE_NONE
        from mitesting.math_objects import math_object
        from mitesting.sympy_customized import parse_and_process
        from sympy import Symbol, Dummy

        from mitesting.models import QuestionAnswerOption
        import pickle, base64

        # ExpressionFromAnswer contains information about any
        # answers that were assigned to expressions
        for expression in question.expressionfromanswer_set.all():
            # will assign Dummy(default_value) if no response given for answer
            # or if error in parsing respons
            default_value= re.sub('_long_underscore_', '\uff3f',
                                  expression.default_value)

            math_expr= Dummy(default_value)

            answer_number=expression.answer_number

            try:
                response=user_responses[answer_number-1]
            except (IndexError, TypeError):
                pass
            else:
                if response['code']==expression.answer_code:
                    if expression.answer_type==\
                       QuestionAnswerOption.MULTIPLE_CHOICE:
                        mc_dict=pickle.loads(base64.b64decode(expression.answer_data))
                        try:
                            response_text=mc_dict[int(response['response'])]
                        except (ValueError, KeyError):
                            response_text=default_value
                        math_expr=Symbol(response_text)
                    else:
                        try:
                            math_expr =  parse_and_process(
                                response['response'], 
                                local_dict=user_dict, 
                                split_symbols=\
                                expression.split_symbols_on_compare,
                                evaluate_level=EVALUATE_NONE,
                                assume_real_variables=expression.real_variables,
                                parse_subscripts = expression.parse_subscripts
                            )
                        except:
                            pass
            # add expression to local_dict and any alternate_dicts
            # that may have been created.
            local_dict[expression.name]=math_expr
            for alt_dict in alternate_dicts:
                alt_dict[expression.name]=math_expr
            # add to context 
            expression_context[expression.name] = \
                math_object(math_expr, evaluate_level=EVALUATE_NONE)

        # last, process expressions flagged as post user response
        for expression in question.expression_set\
                                  .filter(post_user_response=True):

            try:
                evaluate_results=expression.evaluate(
                    local_dict=local_dict, 
                    user_dict=user_dict,
                    alternate_dicts=alternate_dicts,
                    random_group_indices=random_group_indices,
                    rng=rng, random_outcomes=random_outcomes)

            # record exception and allow to continue processing expressions
            except Exception as exc:
                error_in_expressions_post_user = True
                expression_error_post_user[expression.name] = str(exc)
                expression_context[expression.name] = '??'
            else:
                expression_context[expression.name] \
                    = evaluate_results['expression_evaluated']
                # the following lists will be empty until the
                # first EXPRESSION_WITH_ALTERNATES is encountered
                alternate_exprs[expression.name] \
                    = evaluate_results['alternate_exprs']
                alternate_funcs[expression.name] \
                    = evaluate_results['alternate_funcs']

    results = {
        'error_in_expressions': error_in_expressions,
        'expression_error': expression_error,
        'error_in_expressions_post_user': error_in_expressions_post_user,
        'expression_error_post_user': expression_error_post_user,
        'failed_conditions': failed_conditions,
        'failed_condition_message': failed_condition_message,
        'expression_context': expression_context,
        'seed': seed,
        }


    return results
def get_question_list(assessment, seed, rng=None, thread_content=None,
                      questions_only=False):
    """
    Return list of questions for assessment, one for each question_set,
    along with additional information about each question.

    After initializing random number generator with seed
    randomly pick a question and determine question group.  
    If assessment is not set to fixed order,
    randomly order the chosen assessments, keeping questions in the same
    group together.

    Each question is randomly assigned a seed, which is to be used
    generate the question and/or solution, ensuring that question
    and solution will match.


    Return a list of dictionaries, one for each question. 
    Each dictionary contains the following:
    - question_set: the question_set from which the question was drawn
    - question: the question chosen
    - seed: the seed to use to render the question
    - relative_weight: the relative weight of the question
    - points: if have thread_content, then convert weight to points
    - group: the group of the question, if specified
    - previous_same_group: True if group is same as that of previous question
    """

    if not rng:
        import random
        rng=random.Random()

    rng.seed(seed)

    question_list = []
    
    total_weight = 0

    from mitesting.utils import get_new_seed

    for question_set in assessment.question_sets():
        questions_in_set = assessment.questionassigned_set.filter(
            question_set=question_set)

        the_question=rng.choice(questions_in_set).question

        # generate a seed for the question
        # so that can have link to this version of question and solution
        question_seed=get_new_seed(rng)

        if questions_only:
            question_list.append({'question_set': question_set,
                                  'question': the_question,
                                  'seed': question_seed,
                              })
            continue

        # find question set detail, if it exists
        try:
            question_detail=assessment.questionsetdetail_set.get(
                question_set=question_set)
        except ObjectDoesNotExist:
            question_detail = None
        
        if question_detail:
            weight=question_detail.weight
            group=question_detail.group
        else:
            weight=1
            group=""

        total_weight += weight

        question_list.append({'question_set': question_set,
                              'question': the_question,
                              'seed': question_seed,
                              'relative_weight': weight,
                              'group': group,
                              'previous_same_group': False
        })


    if questions_only:
        return question_list

    # make weight be relative weight
    # if have thread_content, then multiply by assessment points to
    # get question_points
    for q_dict in question_list:
        q_dict['relative_weight'] /= total_weight
        if thread_content and thread_content.points is not None:
            q_dict['points'] = q_dict["relative_weight"]*thread_content.points

    if assessment.fixed_order:
        for i in range(1, len(question_list)):
            the_group = question_list[i]["group"]
            # if group is not blank and the same as previous group
            # mark as belonging to same group as previous question
            if the_group and question_list[i-1]["group"] == the_group:
                    question_list[i]["previous_same_group"] = True
        return question_list

    # if not fixed order randomly shuffle questions
    # keep questions with same group together
    # i.e., first random shuffle groups, 
    # then randomly shuffle questions within each group
    # set 'previous_same_group' if previous question is from the same group

    # create list of the groups, 
    # adding unique groups to questions with no group
    question_set_groups = {}
    for (ind,q) in enumerate(question_list):
        question_group = q['group']
        if question_group in question_set_groups:
            question_set_groups[question_group].append(ind)
        elif question_group:
            question_set_groups[question_group] = [ind]
        else:
            unique_no_group_name = '_no_group_%s' % ind
            question_set_groups[unique_no_group_name] = [ind]
            q['group']=unique_no_group_name

    # create list of randomly shuffled groups
    groups = list(question_set_groups.keys())
    rng.shuffle(groups)

    # for each group, shuffle questions,
    # creating cummulative list of the resulting question index order
    question_order =[]
    for group in groups:
        group_indices=question_set_groups[group]
        rng.shuffle(group_indices)
        question_order += group_indices

    # shuffle questions based on that order
    # also check if previous question is from same group
    question_list_shuffled =[]
    previous_group = 0
    for i in question_order:
        q=question_list[i]
        this_group = q['group']
        if this_group == previous_group:
            previous_same_group = True
        else:
            previous_same_group = False
        q['previous_same_group'] = previous_same_group
        previous_group = this_group
        question_list_shuffled.append(q)

    return question_list_shuffled