示例#1
0
def _render_question(question, rng, seed, context):


    questions_rendered = context.get('_questions_rendered',0)
    questions_rendered += 1
    context['_questions_rendered'] = questions_rendered

    # use qtag in identifier since coming from tag
    identifier = "qtag_%s" % questions_rendered
    
    try:
        auxiliary_data=context['_auxiliary_data_']
    except KeyError:
        from midocs.functions import return_new_auxiliary_data
        auxiliary_data =  return_new_auxiliary_data()
        context['_auxiliary_data_'] = auxiliary_data


    question_dict= {'question': question, 'seed': seed}
    question_data = render_question(question_dict, 
                                    question_identifier=identifier,
                                    auxiliary_data=auxiliary_data,
                                    rng=rng)
    
    html_string = template.loader.render_to_string("mitesting/question_body.html",
                                                   {'question_data': question_data})
    
    
    html_string = '<div class="question">%s</div>' % html_string

    return html_string
示例#2
0
    def get_context_data(self, **kwargs):
        context = super(QuestionView, self).get_context_data(**kwargs)
        
        try:
            seed = self.request.GET['seed']
        except:
            seed = None

        # show help if not rendering solution
        show_help = not self.solution
        
        # In question view, there will be only one question on page.
        # Identifier doesn't matter.  Use qv to indiciate from question view.
        identifier = "qv"

        from midocs.functions import return_new_auxiliary_data
        auxiliary_data =  return_new_auxiliary_data()

        import random
        rng = random.Random()

        question_dict={'question': self.object,
                       'seed': seed,}
        from mitesting.render_questions import render_question
        context['question_data']= render_question(
            question_dict=question_dict,
            rng=rng,  user=self.request.user,
            question_identifier=identifier, 
            allow_solution_buttons=True,
            solution=self.solution,
            show_help = show_help,
            auxiliary_data=auxiliary_data,
            show_post_user_errors=True)

        context['_auxiliary_data_'] = auxiliary_data

        context['show_lists']=True

        # no Google analytics for questions
        context['noanalytics']=True

        return context
示例#3
0
    def post(self, request, *args, **kwargs):
        # Look up the question to grade
        question = self.get_object()
        

        pairs = [s2 for s1 in request.body.split(b'&') for s2 in s1.split(b';')]
        cgd = None
        for name_value in pairs:
            if not name_value:
                continue
            nv = name_value.split(b'=', 1)
            if len(nv) != 2:
                continue
            
            if nv[0]==b'cgd':
                cgd = nv[1]
                break

        import pickle, base64, binascii
        try:
            computer_grade_data = pickle.loads(
                base64.b64decode(cgd))
        except (TypeError, IndexError, EOFError, binascii.Error) as exc:
            logger.error("cgd malformed: %s" % exc)
            return JsonResponse({})

        course_code = computer_grade_data.get('course_code')
        assessment_code = computer_grade_data.get('assessment_code')
        
        assessment = None
        course=None

        if assessment_code and course_code:
            try:
                from micourses.models import Assessment
                assessment = Assessment.objects.get(course__code=course_code,
                                                    code=assessment_code)
                course=assessment.course
            except ObjectDoesNotExist:
                assessment_code = None

        # if user cannot view question solution,
        # or if user cannot view assessment solution (in case question is
        # part of an assessment)
        # then return empty json object
        if not question.user_can_view(request.user, solution=True, 
                                      course=course):
            return JsonResponse({})
        if assessment:
            if not assessment.user_can_view(request.user, solution=True,
                                            include_questions=False):
                return JsonResponse({})


        question_identifier = computer_grade_data['identifier']

        # set up context from question expressions
        seed = computer_grade_data['seed']

        from midocs.functions import return_new_auxiliary_data
        auxiliary_data =  return_new_auxiliary_data()
        auxiliary_data['applet']['suffix'] = "%s_sol" % question_identifier

        question_attempt=None
        question_attempt_id = computer_grade_data.get("question_attempt_id")
        if question_attempt_id is not None:
            try:
                from micourses.models import QuestionAttempt
                question_attempt = QuestionAttempt.objects.get(
                    id=question_attempt_id)
            except QuestionAttempt.DoesNotExist:
                pass

        import random
        rng=random.Random()

        question_dict={
            'question': question,
            'seed': seed,
            'question_attempt': question_attempt,
         }

        from mitesting.render_questions import render_question
        question_data= render_question(
            question_dict=question_dict,
            rng=rng, user=request.user,
            question_identifier="%s_sol" % question_identifier, 
            auxiliary_data = auxiliary_data,
            solution=True,
            show_help = False)

        from django.template.loader import get_template
        question_solution_template = get_template(
            "mitesting/question_solution_body.html")
        rendered_solution = question_solution_template.render(
                {'question_data': question_data})

        rendered_solution = mark_safe("<h4>Solution</h4>" + rendered_solution)
        results = {'rendered_solution': rendered_solution,
                   'identifier': question_identifier,
                   'applet_javascript': auxiliary_data['applet']['javascript'],
                   }


        
        # if not from a question attempt, then just return solution
        # and don't record fact
        if not question_attempt:
            return JsonResponse(results)

        
        ce = course.courseenrollment_set.get(student=request.user.courseuser)

        own_attempt = True
        if ce != question_attempt.content_attempt_question_set\
                                 .content_attempt.record.enrollment:
            own_attempt = False
        
        # if not an instructor, then show solution only if question attempt
        # is own attempt
        from micourses.models import INSTRUCTOR_ROLE, DESIGNER_ROLE
        if not (ce.role == INSTRUCTOR_ROLE or ce.role == DESIGNER_ROLE):
            if not own_attempt:
                return JsonResponse({})

        if own_attempt and not question_attempt.solution_viewed:
            # record fact that viewed solution for this question_attempt
            question_attempt.solution_viewed = timezone.now()
            for trans_i in range(5):
                try:
                    with transaction.atomic(), reversion.create_revision():
                        question_attempt.save()
                except OperationalError:
                    if trans_i==4:
                        raise
                else:
                    break

        # return solution
        return JsonResponse(results)
def render_question_list(assessment, question_list, assessment_seed, rng=None,
                         user=None, solution=False,
                         auxiliary_data=None,
                         show_post_user_errors=False):
    """
    Generate list of rendered questions or solutions for assessment.

    After initializing random number generator with seed
    (or generating a new seed if seed was None),
    from each question set, randomly pick a question and a question seed,
    render the question, determine score on this attempt of assessment,
    and determine question group.  If assessment is not set to fixed order,
    randomly order the chosen assessments, keeping questions in the same
    group together.
    

    Inputs:
    - assessment: the assessment to be rendered
    - rng: instance of random number generator to use
    - seed: random number generator seed to generate assesment and questions
    - user: the logged in user
    - solution: True if rendering solution
    - current_attempt: information about score so far on computer scored
      assessments (need to fix and test)
    - auxiliary_data: dictionary for information that should be accessible 
      between questions or outside questions.  Used, for example, 
      for information about applets and hidden sections embedded in text
    - show_post_user_errors: if true, show errors in expressions that are
      flagged as post user response

    Outputs:
    - seed that used to generate assessment (the input seed unless it was None)
    - question_list.  List of dictionaries, one per question, giving
      information about the question.  Each dictionary contains:
      - question_set: the question_set from which the question was drawn
      - question: the question chosen
      - points: the number of points the question set is worth
      - seed: the seed to use to render the question
      - question_data: dictionary containing the information needed to
        display the question with question_body.html template.
        This dictionary is what is returned by render_question, supplemented by
        - points: copy of above points
        - current_credit: the percent credit achieved in current attempt
        - current_score: the score (points) achieved in the current attempt
        - attempt_url: url to current attempt for the question
      - group: the group the question set belongs to
      - previous_same_group: true if the previous question if from the
        same question group as the current question
        (Used as indicator for templates that questions belong together.)
    """


    """
    Question: can we move all this logic to render_question?
    Or do we need simpler version of render_question somewhere?
    
    render_question called:
    inject solution view
    
    render in question model, which is called by
    _render_question in question_tags
    question view

    I hope we can move it all over. 
    Then, here we just set the question identifier and pass in the
    question_dict from question_list.

    then just add question_data to question_dict.
    or have it automatically added...

    """

    if not rng:
        import random
        rng=random.Random()

    for (i, question_dict) in enumerate(question_list):

        # use qa for identifier since coming from assessment
        identifier="qa%s" % i


        question_data = render_question(
            question_dict,
            rng=rng, solution=solution,
            question_identifier=identifier,
            user=user, show_help=not solution,
            assessment=assessment,
            assessment_seed=assessment_seed, 
            record_response=True,
            allow_solution_buttons=assessment.allow_solution_buttons,
            auxiliary_data=auxiliary_data,
            show_post_user_errors=show_post_user_errors)
        
        question_dict['question_data']=question_data




    return question_list