示例#1
0
    def post(self, request, *args, **kwargs):

        # Look up the question to grade
        question = self.get_object()
        
        pairs = [s2 for s1 in request.body.split(b'&') for s2 in s1.split(b';')]
        cgd = None
        for name_value in pairs:
            if not name_value:
                continue
            nv = name_value.split(b'=', 1)
            if len(nv) != 2:
                continue
            
            if nv[0]==b'cgd':
                cgd = nv[1]
                break

        import pickle, base64
        computer_grade_data = pickle.loads(
            base64.b64decode(cgd))

        question_identifier = computer_grade_data['identifier']

        # set up context from question expressions
        seed = computer_grade_data['seed']
        
        response_data = request.POST

        answer_info = computer_grade_data['answer_info']
        
        user_responses = []
        for answer_num in range(len(answer_info)):
            answer_identifier = answer_info[answer_num]['identifier']
            user_responses.append({
                'identifier': answer_identifier,
                'code': answer_info[answer_num]['code'],
                'response': 
                response_data.get('answer_%s' % answer_identifier, "")})

        question_attempt=None
        question_attempt_id = computer_grade_data.get("question_attempt_id")

        if question_attempt_id is not None:
            try:
                from micourses.models import QuestionAttempt
                question_attempt = QuestionAttempt.objects.get(
                    id=question_attempt_id)
            except QuestionAttempt.DoesNotExist:
                pass
        
        from .grade_question import grade_question
        answer_results=grade_question(
            question=question,
            question_identifier=question_identifier,
            question_attempt=question_attempt,
            answer_info=answer_info, 
            user_responses=user_responses, seed=seed)
        
        # increment number of attempts
        try:
            number_attempts = int(response_data['number_attempts_%s' % 
                                                question_identifier])
        except (KeyError, ValueError):
            number_attempts=0

        number_attempts+=1
        answer_results['number_attempts'] = number_attempts
        
        show_solution_button = computer_grade_data.get(
            'show_solution_button', False)

        enable_solution_button = False
        if show_solution_button and \
                question.show_solution_button_after_attempts and \
                number_attempts >= question.show_solution_button_after_attempts:
            enable_solution_button = True

        answer_results['enable_solution_button'] = enable_solution_button

        record_response = computer_grade_data['record_response'] 

        # if not recording the result of the question,
        # we're finished, so return response with the results
        if not (record_response and question_attempt):
            return JsonResponse(answer_results)


        content_attempt = question_attempt.content_attempt_question_set\
                         .content_attempt
        content_record=content_attempt.record
        content = content_record.content

        # Verify that logged in user is the student of the content_record.
        # If not, don't record results
        # (Important so instructors viewing student results don't
        # inadvertantly change student's score.)
        if not content_record.enrollment \
               or request.user.courseuser != content_record.enrollment.student:
            return JsonResponse(answer_results)


        # Have question attempt of enrolled student.
        # Possibly situations
        # 1. Content is marked as not recording scores.
        # 2. Content is past due
        # 3. Content is not yet due
        # 4. Solution has been viewed
        # 5. Content is available and solution hasn't been viewed.
        #
        # In cases 1-4, an invalid response will be recorded,
        # and a message will be displayed to indicate the problem.
        # Score totals will not be updated.
        #
        # In case 5, if have an invalid attempt (content or question attempt)
        # then response will still be treated as invalid even though
        # the response itself will be marked as valid.
        # Display a message indicating student should generate a new attempt.
        # Score totals will not be updated
        #
        # In case 5, if have a valid attempt (both content and question attempt)
        # then record valid response and update scores.

        record_valid_response = True

        if not content.record_scores:
            record_valid_response = False

        from micourses.models import AVAILABLE,  NOT_YET_AVAILABLE, PAST_DUE
        
        assessment_availability = content.return_availability(content_record)

        if assessment_availability != AVAILABLE:
            record_valid_response = False

        # check if student already viewed the solution
        # if so, mark as to not record response
        if question_attempt.solution_viewed:
            solution_viewed = True
            record_valid_response = False
        else:
            solution_viewed = False

        # Record response.
        # Invalid responses won't count toward score and 
        # won't be viewable by student
        from micourses.models import QuestionResponse

        # in case get deadlock, try to save answer (and recalculate score)
        # five times
        for trans_i in range(5):
            try:
                with transaction.atomic():
                    QuestionResponse.objects.create(
                        question_attempt=question_attempt,
                        response=json.dumps(user_responses),
                        credit=answer_results['credit'],
                        valid = record_valid_response)
            except OperationalError:
                if trans_i==4:
                    raise
            else:
                break

        # if did not have a valid attempt, treat as though 
        # response were marked as invalid, since it won't count.
        if not (content_attempt.valid and question_attempt.valid):
            record_valid_response = False

        answer_results['record_valid_response'] = record_valid_response

        if not content.record_scores:
            feedback_message = "Assessment not set up for recording answers.<br/>Answer not recorded."
        elif assessment_availability == PAST_DUE:
            current_tz = timezone.get_current_timezone()
            due = content.get_adjusted_due(content_record)
            due = current_tz.normalize(due.astimezone(current_tz))

            from micourses.utils import format_datetime
            feedback_message = "Due date %s of %s is past.<br/>Answer not recorded." % (format_datetime(due), content.get_title())

        elif assessment_availability == NOT_YET_AVAILABLE:
            feedback_message = "Assessment is not yet available. <br/>Answer not recorded."
        elif solution_viewed:
            feedback_message = "Solution for question already viewed for this attempt.<br/>Answer not recorded. <br/>Generate a new attempt to resume recording answers." 
        elif not (content_attempt.valid and question_attempt.valid):
            feedback_message = "The current assessment attempt is not valid.<br/>It might have been started before the assessment was available.<br/>Answer not recorded.<br/>Generate a new attempt or reload page to start recording answers."
        else:
            feedback_message = ""

        if record_valid_response:
            feedback_message += "Answer recorded for %s.<br/>Course: <a href=\"%s\">%s</a>" % (request.user,reverse('micourses:content_record', kwargs={'content_id': content.id, 'course_code': content.course.code} ), content.course)

        answer_results['feedback'] += "<p>%s</p>" % feedback_message


        # if didn't record valid response, don't update scores,
        # so return without setting values
        if not record_valid_response:
            return JsonResponse(answer_results)
            

        from mitesting.utils import round_and_int
        question_attempt.refresh_from_db()
        if question_attempt.credit is None:
            answer_results['current_percent_credit']=0
        else:
            answer_results['current_percent_credit']=round_and_int(
                question_attempt.credit*100,1)

        content_attempt.refresh_from_db()
        if content_attempt.score is None:
            answer_results['attempt_score']=0
        else:
            answer_results['attempt_score']=round_and_int(
                content_attempt.score,1)
        
        content_record.refresh_from_db()
        if content_record.score is None:
            answer_results['content_score']=0
        else:
            answer_results['content_score']=round_and_int(
                content_record.score,1)

        return JsonResponse(answer_results)
示例#2
0
    def get_context_data(self, **kwargs):
        context = super(AssessmentOverview, self).get_context_data(**kwargs)

        # thread_content will be None
        # if assessment is not in thread and number in thread is 1
        try:
            thread_content = self.assessment.determine_thread_content(
                self.number_in_thread)
        except ObjectDoesNotExist:
            raise Http404("No assessment found") 
            
        context['thread_content']=thread_content
        context['number_in_thread'] = self.number_in_thread
        course = self.assessment.course
        context['course'] = course
        
        if thread_content:
            context['assessment_name'] = thread_content.get_title()
        else:
            context['assessment_name'] = self.assessment.name

        from mitesting.utils import round_and_int
        if thread_content:
            context['thread_content_points'] = round_and_int(
                thread_content.points)

            try: 
                student = self.request.user.courseuser
            except AttributeError:
                student = None

            if student:
                try:
                    cr= thread_content.contentrecord_set.get(
                        enrollment__student=student)
                except ObjectDoesNotExist:
                    cr = None

                if cr:
                    student_score = cr.score
                    if student_score is not None:
                        context['content_score']=round_and_int(student_score,1)
                    else:
                        context['content_score'] = '--'
                    context['have_user_score'] = True
                    context['record_url'] = reverse(
                        'micourses:content_record',
                        kwargs={'course_code': course.code,
                                'content_id': thread_content.id})
                    
            context['assigned'] = thread_content.assigned
            due = thread_content.get_adjusted_due(student=student)
            
            if student and course.adjust_due_attendance and due:
                due_date_url = reverse(
                    'micourses:adjusted_due_calculation',
                    kwargs={'course_code': course.code,
                            'content_id' :thread_content.id }
                )

                from micourses.utils import format_datetime
                current_tz = timezone.get_current_timezone()
                due_string = format_datetime(current_tz.normalize(
                    due.astimezone(current_tz)))
                due = mark_safe('<a href="%s">%s</a>' % \
                                (due_date_url, due_string))
            context['due']=due

        # user has permission to view the assessment, given privacy level
        if self.assessment.user_can_view(self.user, solution=False):
            if self.number_in_thread > 1:
                get_string = "n=%s" % self.number_in_thread
            else:
                get_string=""

            if thread_content and thread_content.substitute_title:
                context['assessment_link'] = self.assessment.return_direct_link(
                    link_text=thread_content.substitute_title,
                    get_string=get_string)
            else:
                context['assessment_link'] = self.assessment.return_direct_link(
                    get_string=get_string)
        else:
            context['assessment_link'] = None

        # generate assessment link if can administer and thread content exists
        if thread_content and user_can_administer_assessment(
                self.user, course=course):
            context['generate_course_attempt_link'] = True
        else:
            context['generate_course_attempt_link'] = False


        # turn off Google analytics for localhost/development site
        context['noanalytics']=(settings.SITE_ID <= 2)
            
        return context
示例#3
0
    def get_context_data(self, **kwargs):
        context = super(AssessmentView, self).get_context_data(**kwargs)


        from midocs.functions import return_new_auxiliary_data
        auxiliary_data =  return_new_auxiliary_data()
        context['_auxiliary_data_'] = auxiliary_data

        import random
        rng=random.Random()

        # show post user response errors only if instructor permissions
        if user_has_given_assessment_permission_level(
                self.request.user, 2):
            show_post_user_errors=True
        else:
            show_post_user_errors=False

        from micourses.render_assessments import render_question_list
        rendered_list=render_question_list(
            self.assessment, self.question_list, rng=rng, 
            assessment_seed=self.assessment_seed, 
            user=self.request.user, 
            solution=self.solution,
            auxiliary_data = auxiliary_data,
            show_post_user_errors=show_post_user_errors)

        # if question_only is set, then view only that question
        if self.kwargs.get('question_only'):
            question_only = int(self.kwargs['question_only'])
            rendered_list=rendered_list[question_only-1:question_only]
            context['question_only'] = question_only
        context['rendered_list'] = rendered_list

        context['seed'] = self.assessment_seed

        # determine if there were any errors
        success=True
        question_errors=[]
        for (ind,q) in enumerate(rendered_list):
            if not q["question_data"]["success"]:
                success=False
                question_errors.append(str(ind+1))
        if not success:
            context['error_message'] = \
                "Errors occurred in the following questions: %s" %\
                ", ".join(question_errors)

        context['success'] = success
        
        context['generate_course_attempt_link'] = False
        context['show_solution_link'] = False

        course = self.assessment.course
        context['course'] = course
        
        if user_can_administer_assessment(self.request.user, course=course):
            if self.thread_content:
                context['generate_course_attempt_link'] = True
            if not self.solution:
                context['show_solution_link'] = True

        if self.thread_content:
            context['assessment_name'] = self.thread_content.get_title()
        else:
            context['assessment_name'] = self.assessment.name
        if self.solution:
            context['assessment_name'] += " solution"
        context['assessment_short_name'] = self.assessment.return_short_name()
        if self.solution:
            context['assessment_short_name'] += " sol."

        if self.version:
            context['version'] =  self.version
            context['assessment_name_with_version'] = "%s, version %s" % \
                        (context['assessment_name'], context['version'])
            context['assessment_short_name_with_version'] = "%s, version %s" % \
                        (context['assessment_short_name'], context['version'])
        else:
            context['version'] = ''
            context['assessment_name_with_version'] = context['assessment_name']
            context['assessment_short_name_with_version'] \
                = context['assessment_short_name']

        if self.course_enrollment and self.thread_content:
            if self.course_enrollment.role == STUDENT_ROLE and self.current_attempt:
                due = self.thread_content.get_adjusted_due(
                    self.current_attempt.record)

                if course.adjust_due_attendance and due:
                    due_date_url = reverse(
                        'micourses:adjusted_due_calculation',
                        kwargs={'course_code': course.code,
                                'content_id': self.thread_content.id }
                    )
                    from micourses.utils import format_datetime
                    current_tz = timezone.get_current_timezone()
                    due_string = format_datetime(current_tz.normalize(
                        due.astimezone(current_tz)))
                    due = mark_safe('<a href="%s">%s</a>' % \
                                               (due_date_url, due_string))
                context['due'] = due
            else:
                context['due'] = self.thread_content.get_adjusted_due()

        context['thread_content'] = self.thread_content
        context['number_in_thread'] = self.number_in_thread
        context['current_attempt'] = self.current_attempt

        context['users attempt'] = False
        context['multiple_attempts'] = False
        context['attempt_url']=None
        context['record_url']=None


        # set date from current_attempt, else as now
        if self.current_attempt:
            context['assessment_date'] = self.current_attempt.attempt_began
        else:
            context['assessment_date'] = timezone.now()


        # Check if have current attempt that belongs to user
        # (so can show score)
        # Create links to record and attempts (if valid)

        if self.current_attempt and \
           self.current_attempt.record.enrollment == self.course_enrollment:

            context['users_attempt'] = True

            valid_attempt_list = list(
                self.current_attempt.record.attempts.filter(valid=True))
            context['multiple_attempts'] = len(valid_attempt_list)>1

            context['record_url'] = reverse(
                'micourses:content_record',
                kwargs={'course_code': course.code,
                        'content_id': self.thread_content.id})

            if self.current_attempt.valid:
                attempt_number = valid_attempt_list.index(self.current_attempt)\
                                 +1
                context['attempt_url'] = reverse(
                    'micourses:content_attempt', 
                    kwargs={'course_code': course.code,
                            'content_id': self.thread_content.id,
                            'attempt_number': attempt_number})

                # add question attempt urls to rendered_list question_data
                for (ind,q) in enumerate(rendered_list):
                    q["question_data"]["attempt_url"] = reverse(
                        'micourses:question_attempts', 
                        kwargs={'course_code': course.code, 
                                'content_id': self.thread_content.id, 
                                'attempt_number': attempt_number, 
                                'question_number': ind+1} )



        from mitesting.utils import round_and_int
        if self.thread_content:
            context['thread_content_points'] = round_and_int(
                self.thread_content.points)
        if self.current_attempt is None or self.current_attempt.score is None:
            context['attempt_score']=0
        else:
            context['attempt_score']=round_and_int(
                self.current_attempt.score,1)
        
        if self.current_attempt is None or \
           self.current_attempt.record.score is None:
            context['content_score']=0
        else:
            context['content_score']=round_and_int(
                self.current_attempt.record.score,1)


        # get list of the question numbers in assessment
        # if instructor or designer in course
        # if also staff, include links to admin pages
        if user_can_administer_assessment(self.request.user, course=course):
            question_numbers=[]
            if self.request.user.is_staff:
                context['assessment_admin_link'] = mark_safe(
                    "<p><a href='%s'>%s</a></p>" % (
                        reverse('admin:micourses_assessment_change',
                                args=(self.assessment.id,)),
                        'Admin link'))
            for q in rendered_list:
                # if staff, add link to admin page for quesiton
                if self.request.user.is_staff:
                    question_numbers.append(
                        "<a href='%s'>%s</a>" % (
                            reverse('admin:mitesting_question_change',
                                    args=(q['question'].id,)),
                            q['question'].id)
                    )
                else:
                    question_numbers.append(str(q['question'].id))
            question_numbers = ", ".join(question_numbers)
            question_numbers = mark_safe(question_numbers)
        else:
            question_numbers=None
        context['question_numbers']=question_numbers

        # turn off Google analytics for localhost/development site
        context['noanalytics']=(settings.SITE_ID <= 2)

        from mitesting.utils import get_new_seed
        context['new_seed']=get_new_seed(rng)

        return context
示例#4
0
def render_question(question_dict, rng, solution=False, 
                    question_identifier="",
                    user=None, show_help=True,
                    assessment=None, 
                    assessment_seed=None, 
                    readonly=False, auto_submit=False, 
                    record_response=True,
                    allow_solution_buttons=False,
                    auxiliary_data=None,
                    show_post_user_errors=False,
                ):

    """
    Render question or solution by compiling text in expression context

    The rendering of the question is done in three steps
    1.  Evaluate all expressions to create the expression context
    2.  Render templates of question or solution text, including subparts
    3.  If question is computer graded, set up conditions for submitting
        and recording answer.

    Input arguments
    - question: the Question instance to be rendered
    - rng: the random number generator instance to use
    - seed: the random generator seed
      Used for setting up the expression context.
      If seed is none, then randomly generate a seed, recording the new
      seed so that exact version can be reproduced by passing seed in
    - solution: if true, generate the solution.  Else generate the question.
    - question_identifier: should be a string that uniquely identifies
      this particular question among any others on the page
    - user: a User instance.  Used to determine if solution is viewable
      and for recording answers of computer graded questions
    - show_help: if true, show help (hints and reference pages).
    - assessment: if not None, indicates the Assessment instance
      in which question is being rendered.  Used to determine if solution is
      visible and for recording answers of computer graded questions
    - question_set: which assessment question_set the question belongs to.
      Used for recording answers of computer graded questions
    - assessment_seed: which assessment seed was used to generate assessment.
      Used for recording answers of computer graded questions
    - prefilled_responses: a list containing respones for answer blanks.
      Useful for redisplaying student answers
    - readonly: if true, then all answer blanks are readonly.
      Useful with prefilled answers.
    - auto_submit: automatically submit answers (instead of submit button)
      Useful with prefilled answers
    - record_response: if true, record answer upon submit
    - allow_solution_buttons: if true, allow a solution button to be displayed
      on computer graded questions
    - auxiliary_data: dictionary for information that should be accessible 
      between questions or outside questions.  Used, for example, 
      for information about applets and hidden sections embedded in text
    - show_post_user_errors: if true, display errors when evaluating
      expressions flagged as being post user response.  Even if showing
      errors, such an error does not cause the rendering success to be False
    - random_outcomes: dictionary keyed by expression id that specify
      the random results should obtain.  If valid and no failed condition, 
      then random number generator is not used.

    The output is a question_data dictionary.  With the exception of
    question, success, rendered_text, and error_message, all entries
    are optional.  The entries are
    - question: the question that was rendered
    - success: true if question rendered without errors.
      If false, rendered text will still show as much of the question
      as was processed, but submit_button will not be set
    - error_message: text explaining all errors encountered
    - rendered_text: the results from rendering the main question text 
    - subparts: a list of dictionaries of results from rendering subparts
      Each dictionary has the following keys:
      - letter: the letter assigned to the supart
      - rendered_text: the results from rendering the subpart text
      - help_available: true if there is help for subpart
      - reference_pages: a list of pages relevant to the subpart
      - hint_text: rendered hint text
   - help_available: true if there is help (hint or links to pages).
      If help_available, then the following
      - reference_pages: a list of pages relevant to the question
      - hint_text: rendered hint text
      - hint_template_error: true if error rendering hint text
    - identifier: the passed in string to identify the question
    - seed: the random number generator seed used to generate question
    - auto_submit: if true, automatically submit answers upon page load
    - submit_button: if true, include button to submit for computer grading
    - show_solution_button: if exists and set to true, then display a
      button to show the solution.  For show_solution_button to be true, 
      allow_solution_button must be true, the user must have permission 
      to view solution of question, and a solution must exist.
      In addition, if assessment is specified, then user must also have
      permission to view solution of assessment for show_solution_button
      to be set to true.
    - enable_solution_button: true if solution button should be enabled
      at the outset.  (Set true if not computer graded.)
    - inject_solution_url: url from which to retrieve solution
    - computer_grade_data: a pickled and base64 encoded dictionary of 
      information about the question to be sent to server with submission
      of results for computer grading.  Some entries are identical to above:
      - seed
      - identifier
      - show_solution_button
      - record_response
      - question_set
      - assessment_seed
      - course_code (of assessment from input)
      - assessment_code (of assessment from input)
      - answer_info: list of codes, points, answer type, identifier, 
        group, assigned expression, prefilled answer, and expression type
        of the answers in question
      - applet_counter: number of applets encountered so far 
        (not sure if need this)
   """


    question = question_dict['question']
    question_set = question_dict.get('question_set')
    seed = question_dict.get("seed")
    question_attempt = question_dict.get("question_attempt")
    response = question_dict.get("response")

    if seed is None:
        from mitesting.utils import get_new_seed
        seed=get_new_seed(rng)

    rng.seed(seed)


    random_outcomes={}

    # if have question attempt, load random outcomes and
    # latest responses from that attempt
    if question_attempt:
        if question_attempt.random_outcomes:
            random_outcomes = json.loads(question_attempt.random_outcomes)

    # if have response, load to be prefilled
    if response:
        prefilled_responses = json.loads(response.response)
    else:
        prefilled_responses = None


    # first, setup context due to expressions from question.
    # include any prefilled responses to answers
    context_results = setup_expression_context(question, rng=rng, seed=seed,
                                            user_responses=prefilled_responses,
                                            random_outcomes=random_outcomes)


    # if failed condition, then don't display the question
    # but instead give message that condition failed
    if context_results.get('failed_conditions'):
        question_data = {
            'question': question,
            'success': False,
            'error_message': mark_safe(
                '<p>'+context_results['failed_condition_message']+'</p>'),
            'rendered_text': mark_safe(
                "<p>Question cannot be displayed"
                + " due to failed condition.</p>"),
            'seed': seed,
        }
        
        # save new seed to question attempt so on next reload,
        # a new seed will be tried.
        if question_attempt and seed==question_attempt.seed:
            question_attempt.seed = context_results["seed"]

            # repeat so that can retry if get transaction deadlock
            for trans_i in range(5):
                try:
                    with transaction.atomic(), reversion.create_revision():
                        question_attempt.save()
                except OperationalError:
                    if trans_i==4:
                        raise
                else:
                    break
            
        return question_data


    # if seed changed from resampling to avoid failed expression conditions
    if seed != context_results["seed"]:
        # if initial question seed matched that from question_attempt,
        # then record updated seed to reduce future resampling
        if question_attempt and seed==question_attempt.seed:
            question_attempt.seed = context_results["seed"]
            # will save changes below

    # set seed to be successful seed from rendering context
    seed = context_results['seed']

    # if have question attempt, save random_outcomes, if changed
    if question_attempt:
        ro_json = json.dumps(random_outcomes)
        if question_attempt.random_outcomes != ro_json:
            question_attempt.random_outcomes = ro_json
            # repeat so that can retry if get transaction deadlock
            for trans_i in range(5):
                try:
                    with transaction.atomic(), reversion.create_revision():
                        question_attempt.save()
                except OperationalError:
                    if trans_i==4:
                        raise
                else:
                    break
    

    # record actual seed used in question_dict
    # not sure if need this
    question_dict['seed']=seed

    render_data = {
        'question': question, 'show_help': show_help, 
        'expression_context': context_results['expression_context'],
        'user': user, 'assessment': assessment
        }

    # Add auxiliary_data to context with key _auxiliary_data_
    # to avoid overwriting expressions
    render_data['expression_context']['_auxiliary_data_'] = auxiliary_data

    # set up dynamic text
    # context variables used for dynamic text tags
    from dynamictext.models import DynamicText
    render_data['expression_context']['_dynamictext_object']=question
    render_data['expression_context']['_dynamictext_instance_identifier']\
        = question_identifier
    # javascript used to update dynamic text
    num_dts = DynamicText.return_number_for_object(question)
    dynamictext_javascript=""
    for i in range(num_dts):
        dt = DynamicText.return_dynamictext(question,i)
        javascript_function=dt.return_javascript_render_function(
            mathjax=True, instance_identifier=question_identifier)
        dynamictext_javascript += "%s_dynamictext_update= %s\n" % \
                                  (dt.return_identifier(question_identifier),
                                   javascript_function)
    if dynamictext_javascript:
        dynamictext_javascript = mark_safe("\n<script>\n%s</script>\n" % \
                                           dynamictext_javascript)
        
    # answer data to keep track of
    # 1. possible answer_codes that are valid
    # 2. the answer_codes that actually appear in the question
    # 3. the multiple choices that actually appear in the question
    (valid_answer_codes, invalid_answers, invalid_answer_messages) =\
        return_valid_answer_codes(question, render_data['expression_context'])

    answer_data = { 'valid_answer_codes': valid_answer_codes,
                    'answer_info': [],
                    'question': question,
                    'question_identifier': question_identifier,
                    'prefilled_responses': prefilled_responses,
                    'readonly': readonly,
                    'error': bool(invalid_answers),
                    'answer_errors': invalid_answer_messages,
                    'rng': rng
                    }

    render_data['expression_context']['_answer_data_']= answer_data

    question_data = render_question_text(render_data, solution=solution)

    question_data.update({
        'identifier': question_identifier,
        'auto_submit': auto_submit,
        'seed': seed,
        'dynamictext_javascript': dynamictext_javascript,
    })

    # if have prefilled responses, check to see that the number matches the
    # number of answer blanks (template tag already checked if
    # the answer_codes matched for those answers that were found)
    # If so, log warning but otherwise ignore.
    if prefilled_responses:
        if len(prefilled_responses) != len(answer_data["answer_info"]):
            message = "Invalid number of previous responses.\nQuestion: %s"\
                      % question
            if assessment:
                message += "\nAssessment: %s" % assessment
            logger.warning(message)
    

    # If render or expression error, combine all error messages
    # for display in question template.
    question_data['error_message'] = ''

    question_data['success'] = True

    # errors from post user expression don't cause success to be marked as false
    # so that one can still submit new responses
    if (context_results.get('error_in_expressions_post_user')
        and show_post_user_errors):
        errors = context_results['expression_error_post_user']
        for expr in errors.keys():
            question_data['error_message'] += '<li>' + \
                    re.sub(r"\n", r"<br/>", errors[expr]) + '</li>'

    if question_data.get('render_error') \
            or context_results.get('error_in_expressions')\
            or answer_data.get('error'):
        # any other error trigger failure
        # which prevents responses from being submitted
        question_data['success']=False
        if context_results.get('error_in_expressions'):
            errors = context_results['expression_error']
            for expr in errors.keys():
                question_data['error_message'] += '<li>' + \
                    re.sub(r"\n", r"<br/>", errors[expr]) + '</li>'
        if question_data.get('render_error'):
            for error_message in question_data["render_error_messages"]:
                question_data['error_message'] += \
                    '<li>%s</li>' % error_message
            del question_data['render_error']
        if answer_data.get('error'):
            for error_message in answer_data['answer_errors']:
                question_data['error_message'] += \
                    '<li>%s</li>' % error_message

    if question_data['error_message']:
        question_data['error_message'] = mark_safe(\
            "<ul>" + question_data['error_message'] + "</ul>")


    # if rendering a solution 
    # return without adding computer grading data or solution buttons
    if solution:
        return question_data
    

    # if have a question attempt, determine credit
    # also score if question_dict contains points for question set
    current_score=None
    if question_attempt:
        from mitesting.utils import round_and_int
        if question_attempt.credit is None:
            current_percent_credit=None
            current_score=0
        else:
            current_percent_credit = round_and_int(question_attempt.credit*100,1)
            current_score = round_and_int(question_attempt.credit*question_dict.get('points',0),2)

    else:
        current_percent_credit = None

    # record information about score and points in question_data
    # so is available in question_body.html template
    question_data['points']=question_dict.get('points')
    question_data['current_score']=current_score
    question_data['current_percent_credit']=current_percent_credit


    # if allow_solution_buttons is true, then determine if
    # solution is visible to user (ie. user has permissions)
    # and solution exists
    
    # solution is visible if user has permisions for question and, 
    # in the case when the question is part of an assessment, 
    # also has permissions for assessment 
    # (not adjusted for privacy of other questions)

    show_solution_button = False
    if allow_solution_buttons:
    
        solution_visible = False
        if render_data.get('user') and \
                question.user_can_view(user=render_data['user'],solution=True):
            if render_data.get('assessment'):
                if render_data['assessment'].user_can_view(
                    user=render_data['user'], solution=True,
                    include_questions=False):
                    solution_visible=True
            else:
                solution_visible=True

        if solution_visible:
            # check if solution text exists in question or a subpart
            solution_exists=bool(question.solution_text)
            if not solution_exists:
                for subpart in question.questionsubpart_set.all():
                    if subpart.solution_text:
                        solution_exists = True
                        break

            if solution_exists:
                show_solution_button=True

    question_data['show_solution_button']=show_solution_button
    if show_solution_button:
        question_data['inject_solution_url'] = reverse(
            'miquestion:injectquestionsolution', kwargs={'question_id': question.id})
        question_data['enable_solution_button'] = not question.computer_graded \
                        or  (question.show_solution_button_after_attempts == 0)

    # if computer graded and answer data available,
    # add submit button (unless auto_submit or error)
    question_data['submit_button'] = question.computer_graded and\
        answer_data['answer_info'] and (not auto_submit) and\
        question_data['success']

    # set up computer grade data to be sent back to server on submit
    # computer grade data contains
    # - information about question (seed, identifier)
    # - information on grading (record answer and allow solution buttons)
    # - information about assessment (code, seed, and question_set)
    # - information about answer blanks found in template (codes and points)
    # - number of applets encountered so far (not sure if need this)

    computer_grade_data = {'seed': seed, 'identifier': question_identifier, 
                           'record_response': record_response,
                           'show_solution_button': show_solution_button}
    if assessment:
        computer_grade_data['course_code'] = assessment.course.code
        computer_grade_data['assessment_code'] = assessment.code
        computer_grade_data['assessment_seed'] = assessment_seed
        if question_set is not None:
            computer_grade_data['question_set'] = question_set

    if question_attempt:
        computer_grade_data['question_attempt_id'] = question_attempt.id
        
    if answer_data['answer_info']:
        computer_grade_data['answer_info'] \
            = answer_data['answer_info']

    # serialize and encode computer grade data to facilitate appending
    # to post data of http request sent when submitting answers
    import pickle, base64
    question_data['computer_grade_data'] = \
        base64.b64encode(pickle.dumps(computer_grade_data))

    return question_data