def process_expressions_from_answers(question):

    # update expression from answers from question text and subparts
    # update dynamic text from question and solution text and subparts

    # just assume every answer code is valid 
    valid_answer_codes = {}
    for ao in question.questionansweroption_set.all():
        answer_dict = {'answer_type': ao.answer_type,
                       'split_symbols_on_compare':
                       ao.split_symbols_on_compare
        }
        valid_answer_codes[ao.answer_code]=answer_dict


    import random
    rng=random.Random()

    answer_data = return_new_answer_data(rng)
    answer_data['valid_answer_codes'] = valid_answer_codes
    answer_data['question']=question

    from midocs.functions import return_new_auxiliary_data
    auxiliary_data =  return_new_auxiliary_data()

    update_context = Context({'question': question, 
                              '_process_dynamictext': True,
                              '_dynamictext_object': question,
                              '_process_expressions_from_answers': True,
                              '_answer_data_': answer_data,
                              '_sympy_local_dict_': {},
                              '_auxiliary_data_': auxiliary_data,
                          })


    from dynamictext.models import DynamicText
    DynamicText.initialize(question)
    question.expressionfromanswer_set.all().delete()
    render_results=render_question_text(
        {'question': question, 'show_help': True,
         'expression_context': update_context,
     })
Beispiel #2
0
    def render(self, context):
        kwargs = dict([(smart_text(k, 'ascii'), v.resolve(context))
                       for k, v in self.kwargs.items()])

        # object will be from kwargs if defined, else from context
        object = kwargs.get("object", context.get("_dynamictext_object"))
        if not object:
            return ""

        if context.get("_process_dynamictext"):
            DynamicText.add_new(object=object, nodelist=self.nodelist)
        number_dynamictext=context.get("_number_dynamictext",0)
        dynamictext=DynamicText.return_dynamictext(object,
                                                   number=number_dynamictext)

        if dynamictext:
            identifier=context.get("_dynamictext_instance_identifier","")
            context['_number_dynamictext']=number_dynamictext+1
            return dynamictext.render(context=context,
                                      include_container=True,
                                      instance_identifier=identifier)
        else:
            return ""
Beispiel #3
0
def grade_question(question, question_identifier, answer_info, 
                   question_attempt,
                   user_responses, seed):

    # use local random generator to make sure threadsafe
    import random
    rng=random.Random()

    random_outcomes={}
    if question_attempt:
        if question_attempt.random_outcomes:
            random_outcomes = json.loads(question_attempt.random_outcomes)

    from .render_questions import setup_expression_context

    # first obtain context from just the normal expressions
    context_results = setup_expression_context(question, rng=rng, seed=seed,
                                user_responses = user_responses,
                                random_outcomes=random_outcomes,
                                )
    expr_context=context_results['expression_context']
    user_dict = expr_context['_user_dict_']


    # render any dynamic text from question
    from dynamictext.models import DynamicText
    num_dts = DynamicText.return_number_for_object(question)
    dynamictext_html=[]
    for i in range(num_dts):
        dt = DynamicText.return_dynamictext(question,i)
        rendered_text=dt.render(context=expr_context, 
                                instance_identifier=question_identifier)
        function_name = "%s_dynamictext_update" % \
                        dt.return_identifier(question_identifier)
        dynamictext_html.append((function_name,rendered_text))
        

    points_achieved=0
    total_points=0

    answer_results={}

    answer_results['identifier']=question_identifier
    answer_results['feedback']=""
    answer_results['answers'] = {}
    answer_results['dynamictext'] = dynamictext_html
    question_groups = {}

    # check correctness of each answer
    for answer_num in range(len(answer_info)):
        answer_type = answer_info[answer_num]['type']

        # if answer_type is None ignore
        # (used for state variables from applets and if answer_code is None)
        if answer_type is None:
            continue

        user_response = user_responses[answer_num]["response"]
        answer_code = answer_info[answer_num]['code']
        answer_points= answer_info[answer_num]['points']
        answer_identifier = answer_info[answer_num]['identifier']
        answer_group = answer_info[answer_num]['group']
        
        the_answer_info = answer_info[answer_num]

        total_points += answer_points
        
        if answer_group is None:
            answer_results['answers'][answer_identifier] = \
                compare_response_with_answer_code \
                (user_response=user_response, the_answer_info=the_answer_info,
                 question=question, expr_context=expr_context,
                 local_dict=user_dict)

            points_achieved += answer_points*\
                answer_results['answers'][answer_identifier]['percent_correct']

        else:
            try:
                group_list = question_groups[answer_group]
            except KeyError:
                group_list = []
                question_groups[answer_group]=group_list
            group_list.append(answer_num)


    for group in question_groups.keys():
        points_achieved += grade_question_group(
            group_list=question_groups[group], 
            user_responses=user_responses,
            answer_info=answer_info, question=question,
            expr_context=expr_context, local_dict=user_dict,
            answer_results=answer_results)

        
    # record if exactly correct, then normalize points achieved
    if total_points:
        answer_correct = (points_achieved == total_points*100)
        points_achieved /= 100.0
        credit = points_achieved/total_points
    else:
        answer_correct = False
        points_achieved /= 100.0
        credit = 0
    answer_results['correct'] = answer_correct
    if total_points == 0:
        total_score_feedback = "<p>No points possible for question</p>"
    elif answer_correct:
        total_score_feedback = "<p>Answer is correct</p>"
    elif points_achieved == 0:
        total_score_feedback = "<p>Answer is incorrect</p>"
    else:
        total_score_feedback = '<p>Answer is %s%% correct'\
            % int(round(credit*100))
    answer_results['feedback'] = total_score_feedback + \
        answer_results['feedback']

    answer_results['credit']=credit

    return answer_results
def render_question(question_dict, rng, solution=False, 
                    question_identifier="",
                    user=None, show_help=True,
                    assessment=None, 
                    assessment_seed=None, 
                    readonly=False, auto_submit=False, 
                    record_response=True,
                    allow_solution_buttons=False,
                    auxiliary_data=None,
                    show_post_user_errors=False,
                ):

    """
    Render question or solution by compiling text in expression context

    The rendering of the question is done in three steps
    1.  Evaluate all expressions to create the expression context
    2.  Render templates of question or solution text, including subparts
    3.  If question is computer graded, set up conditions for submitting
        and recording answer.

    Input arguments
    - question: the Question instance to be rendered
    - rng: the random number generator instance to use
    - seed: the random generator seed
      Used for setting up the expression context.
      If seed is none, then randomly generate a seed, recording the new
      seed so that exact version can be reproduced by passing seed in
    - solution: if true, generate the solution.  Else generate the question.
    - question_identifier: should be a string that uniquely identifies
      this particular question among any others on the page
    - user: a User instance.  Used to determine if solution is viewable
      and for recording answers of computer graded questions
    - show_help: if true, show help (hints and reference pages).
    - assessment: if not None, indicates the Assessment instance
      in which question is being rendered.  Used to determine if solution is
      visible and for recording answers of computer graded questions
    - question_set: which assessment question_set the question belongs to.
      Used for recording answers of computer graded questions
    - assessment_seed: which assessment seed was used to generate assessment.
      Used for recording answers of computer graded questions
    - prefilled_responses: a list containing respones for answer blanks.
      Useful for redisplaying student answers
    - readonly: if true, then all answer blanks are readonly.
      Useful with prefilled answers.
    - auto_submit: automatically submit answers (instead of submit button)
      Useful with prefilled answers
    - record_response: if true, record answer upon submit
    - allow_solution_buttons: if true, allow a solution button to be displayed
      on computer graded questions
    - auxiliary_data: dictionary for information that should be accessible 
      between questions or outside questions.  Used, for example, 
      for information about applets and hidden sections embedded in text
    - show_post_user_errors: if true, display errors when evaluating
      expressions flagged as being post user response.  Even if showing
      errors, such an error does not cause the rendering success to be False
    - random_outcomes: dictionary keyed by expression id that specify
      the random results should obtain.  If valid and no failed condition, 
      then random number generator is not used.

    The output is a question_data dictionary.  With the exception of
    question, success, rendered_text, and error_message, all entries
    are optional.  The entries are
    - question: the question that was rendered
    - success: true if question rendered without errors.
      If false, rendered text will still show as much of the question
      as was processed, but submit_button will not be set
    - error_message: text explaining all errors encountered
    - rendered_text: the results from rendering the main question text 
    - subparts: a list of dictionaries of results from rendering subparts
      Each dictionary has the following keys:
      - letter: the letter assigned to the supart
      - rendered_text: the results from rendering the subpart text
      - help_available: true if there is help for subpart
      - reference_pages: a list of pages relevant to the subpart
      - hint_text: rendered hint text
   - help_available: true if there is help (hint or links to pages).
      If help_available, then the following
      - reference_pages: a list of pages relevant to the question
      - hint_text: rendered hint text
      - hint_template_error: true if error rendering hint text
    - identifier: the passed in string to identify the question
    - seed: the random number generator seed used to generate question
    - auto_submit: if true, automatically submit answers upon page load
    - submit_button: if true, include button to submit for computer grading
    - show_solution_button: if exists and set to true, then display a
      button to show the solution.  For show_solution_button to be true, 
      allow_solution_button must be true, the user must have permission 
      to view solution of question, and a solution must exist.
      In addition, if assessment is specified, then user must also have
      permission to view solution of assessment for show_solution_button
      to be set to true.
    - enable_solution_button: true if solution button should be enabled
      at the outset.  (Set true if not computer graded.)
    - inject_solution_url: url from which to retrieve solution
    - computer_grade_data: a pickled and base64 encoded dictionary of 
      information about the question to be sent to server with submission
      of results for computer grading.  Some entries are identical to above:
      - seed
      - identifier
      - show_solution_button
      - record_response
      - question_set
      - assessment_seed
      - course_code (of assessment from input)
      - assessment_code (of assessment from input)
      - answer_info: list of codes, points, answer type, identifier, 
        group, assigned expression, prefilled answer, and expression type
        of the answers in question
      - applet_counter: number of applets encountered so far 
        (not sure if need this)
   """


    question = question_dict['question']
    question_set = question_dict.get('question_set')
    seed = question_dict.get("seed")
    question_attempt = question_dict.get("question_attempt")
    response = question_dict.get("response")

    if seed is None:
        from mitesting.utils import get_new_seed
        seed=get_new_seed(rng)

    rng.seed(seed)


    random_outcomes={}

    # if have question attempt, load random outcomes and
    # latest responses from that attempt
    if question_attempt:
        if question_attempt.random_outcomes:
            random_outcomes = json.loads(question_attempt.random_outcomes)

    # if have response, load to be prefilled
    if response:
        prefilled_responses = json.loads(response.response)
    else:
        prefilled_responses = None


    # first, setup context due to expressions from question.
    # include any prefilled responses to answers
    context_results = setup_expression_context(question, rng=rng, seed=seed,
                                            user_responses=prefilled_responses,
                                            random_outcomes=random_outcomes)


    # if failed condition, then don't display the question
    # but instead give message that condition failed
    if context_results.get('failed_conditions'):
        question_data = {
            'question': question,
            'success': False,
            'error_message': mark_safe(
                '<p>'+context_results['failed_condition_message']+'</p>'),
            'rendered_text': mark_safe(
                "<p>Question cannot be displayed"
                + " due to failed condition.</p>"),
            'seed': seed,
        }
        
        # save new seed to question attempt so on next reload,
        # a new seed will be tried.
        if question_attempt and seed==question_attempt.seed:
            question_attempt.seed = context_results["seed"]

            # repeat so that can retry if get transaction deadlock
            for trans_i in range(5):
                try:
                    with transaction.atomic(), reversion.create_revision():
                        question_attempt.save()
                except OperationalError:
                    if trans_i==4:
                        raise
                else:
                    break
            
        return question_data


    # if seed changed from resampling to avoid failed expression conditions
    if seed != context_results["seed"]:
        # if initial question seed matched that from question_attempt,
        # then record updated seed to reduce future resampling
        if question_attempt and seed==question_attempt.seed:
            question_attempt.seed = context_results["seed"]
            # will save changes below

    # set seed to be successful seed from rendering context
    seed = context_results['seed']

    # if have question attempt, save random_outcomes, if changed
    if question_attempt:
        ro_json = json.dumps(random_outcomes)
        if question_attempt.random_outcomes != ro_json:
            question_attempt.random_outcomes = ro_json
            # repeat so that can retry if get transaction deadlock
            for trans_i in range(5):
                try:
                    with transaction.atomic(), reversion.create_revision():
                        question_attempt.save()
                except OperationalError:
                    if trans_i==4:
                        raise
                else:
                    break
    

    # record actual seed used in question_dict
    # not sure if need this
    question_dict['seed']=seed

    render_data = {
        'question': question, 'show_help': show_help, 
        'expression_context': context_results['expression_context'],
        'user': user, 'assessment': assessment
        }

    # Add auxiliary_data to context with key _auxiliary_data_
    # to avoid overwriting expressions
    render_data['expression_context']['_auxiliary_data_'] = auxiliary_data

    # set up dynamic text
    # context variables used for dynamic text tags
    from dynamictext.models import DynamicText
    render_data['expression_context']['_dynamictext_object']=question
    render_data['expression_context']['_dynamictext_instance_identifier']\
        = question_identifier
    # javascript used to update dynamic text
    num_dts = DynamicText.return_number_for_object(question)
    dynamictext_javascript=""
    for i in range(num_dts):
        dt = DynamicText.return_dynamictext(question,i)
        javascript_function=dt.return_javascript_render_function(
            mathjax=True, instance_identifier=question_identifier)
        dynamictext_javascript += "%s_dynamictext_update= %s\n" % \
                                  (dt.return_identifier(question_identifier),
                                   javascript_function)
    if dynamictext_javascript:
        dynamictext_javascript = mark_safe("\n<script>\n%s</script>\n" % \
                                           dynamictext_javascript)
        
    # answer data to keep track of
    # 1. possible answer_codes that are valid
    # 2. the answer_codes that actually appear in the question
    # 3. the multiple choices that actually appear in the question
    (valid_answer_codes, invalid_answers, invalid_answer_messages) =\
        return_valid_answer_codes(question, render_data['expression_context'])

    answer_data = { 'valid_answer_codes': valid_answer_codes,
                    'answer_info': [],
                    'question': question,
                    'question_identifier': question_identifier,
                    'prefilled_responses': prefilled_responses,
                    'readonly': readonly,
                    'error': bool(invalid_answers),
                    'answer_errors': invalid_answer_messages,
                    'rng': rng
                    }

    render_data['expression_context']['_answer_data_']= answer_data

    question_data = render_question_text(render_data, solution=solution)

    question_data.update({
        'identifier': question_identifier,
        'auto_submit': auto_submit,
        'seed': seed,
        'dynamictext_javascript': dynamictext_javascript,
    })

    # if have prefilled responses, check to see that the number matches the
    # number of answer blanks (template tag already checked if
    # the answer_codes matched for those answers that were found)
    # If so, log warning but otherwise ignore.
    if prefilled_responses:
        if len(prefilled_responses) != len(answer_data["answer_info"]):
            message = "Invalid number of previous responses.\nQuestion: %s"\
                      % question
            if assessment:
                message += "\nAssessment: %s" % assessment
            logger.warning(message)
    

    # If render or expression error, combine all error messages
    # for display in question template.
    question_data['error_message'] = ''

    question_data['success'] = True

    # errors from post user expression don't cause success to be marked as false
    # so that one can still submit new responses
    if (context_results.get('error_in_expressions_post_user')
        and show_post_user_errors):
        errors = context_results['expression_error_post_user']
        for expr in errors.keys():
            question_data['error_message'] += '<li>' + \
                    re.sub(r"\n", r"<br/>", errors[expr]) + '</li>'

    if question_data.get('render_error') \
            or context_results.get('error_in_expressions')\
            or answer_data.get('error'):
        # any other error trigger failure
        # which prevents responses from being submitted
        question_data['success']=False
        if context_results.get('error_in_expressions'):
            errors = context_results['expression_error']
            for expr in errors.keys():
                question_data['error_message'] += '<li>' + \
                    re.sub(r"\n", r"<br/>", errors[expr]) + '</li>'
        if question_data.get('render_error'):
            for error_message in question_data["render_error_messages"]:
                question_data['error_message'] += \
                    '<li>%s</li>' % error_message
            del question_data['render_error']
        if answer_data.get('error'):
            for error_message in answer_data['answer_errors']:
                question_data['error_message'] += \
                    '<li>%s</li>' % error_message

    if question_data['error_message']:
        question_data['error_message'] = mark_safe(\
            "<ul>" + question_data['error_message'] + "</ul>")


    # if rendering a solution 
    # return without adding computer grading data or solution buttons
    if solution:
        return question_data
    

    # if have a question attempt, determine credit
    # also score if question_dict contains points for question set
    current_score=None
    if question_attempt:
        from mitesting.utils import round_and_int
        if question_attempt.credit is None:
            current_percent_credit=None
            current_score=0
        else:
            current_percent_credit = round_and_int(question_attempt.credit*100,1)
            current_score = round_and_int(question_attempt.credit*question_dict.get('points',0),2)

    else:
        current_percent_credit = None

    # record information about score and points in question_data
    # so is available in question_body.html template
    question_data['points']=question_dict.get('points')
    question_data['current_score']=current_score
    question_data['current_percent_credit']=current_percent_credit


    # if allow_solution_buttons is true, then determine if
    # solution is visible to user (ie. user has permissions)
    # and solution exists
    
    # solution is visible if user has permisions for question and, 
    # in the case when the question is part of an assessment, 
    # also has permissions for assessment 
    # (not adjusted for privacy of other questions)

    show_solution_button = False
    if allow_solution_buttons:
    
        solution_visible = False
        if render_data.get('user') and \
                question.user_can_view(user=render_data['user'],solution=True):
            if render_data.get('assessment'):
                if render_data['assessment'].user_can_view(
                    user=render_data['user'], solution=True,
                    include_questions=False):
                    solution_visible=True
            else:
                solution_visible=True

        if solution_visible:
            # check if solution text exists in question or a subpart
            solution_exists=bool(question.solution_text)
            if not solution_exists:
                for subpart in question.questionsubpart_set.all():
                    if subpart.solution_text:
                        solution_exists = True
                        break

            if solution_exists:
                show_solution_button=True

    question_data['show_solution_button']=show_solution_button
    if show_solution_button:
        question_data['inject_solution_url'] = reverse(
            'miquestion:injectquestionsolution', kwargs={'question_id': question.id})
        question_data['enable_solution_button'] = not question.computer_graded \
                        or  (question.show_solution_button_after_attempts == 0)

    # if computer graded and answer data available,
    # add submit button (unless auto_submit or error)
    question_data['submit_button'] = question.computer_graded and\
        answer_data['answer_info'] and (not auto_submit) and\
        question_data['success']

    # set up computer grade data to be sent back to server on submit
    # computer grade data contains
    # - information about question (seed, identifier)
    # - information on grading (record answer and allow solution buttons)
    # - information about assessment (code, seed, and question_set)
    # - information about answer blanks found in template (codes and points)
    # - number of applets encountered so far (not sure if need this)

    computer_grade_data = {'seed': seed, 'identifier': question_identifier, 
                           'record_response': record_response,
                           'show_solution_button': show_solution_button}
    if assessment:
        computer_grade_data['course_code'] = assessment.course.code
        computer_grade_data['assessment_code'] = assessment.code
        computer_grade_data['assessment_seed'] = assessment_seed
        if question_set is not None:
            computer_grade_data['question_set'] = question_set

    if question_attempt:
        computer_grade_data['question_attempt_id'] = question_attempt.id
        
    if answer_data['answer_info']:
        computer_grade_data['answer_info'] \
            = answer_data['answer_info']

    # serialize and encode computer grade data to facilitate appending
    # to post data of http request sent when submitting answers
    import pickle, base64
    question_data['computer_grade_data'] = \
        base64.b64encode(pickle.dumps(computer_grade_data))

    return question_data