def get_context_data(self, **kwargs): context = super(AssessmentView, self).get_context_data(**kwargs) from midocs.functions import return_new_auxiliary_data auxiliary_data = return_new_auxiliary_data() context['_auxiliary_data_'] = auxiliary_data import random rng=random.Random() # show post user response errors only if instructor permissions if user_has_given_assessment_permission_level( self.request.user, 2): show_post_user_errors=True else: show_post_user_errors=False from micourses.render_assessments import render_question_list rendered_list=render_question_list( self.assessment, self.question_list, rng=rng, assessment_seed=self.assessment_seed, user=self.request.user, solution=self.solution, auxiliary_data = auxiliary_data, show_post_user_errors=show_post_user_errors) # if question_only is set, then view only that question if self.kwargs.get('question_only'): question_only = int(self.kwargs['question_only']) rendered_list=rendered_list[question_only-1:question_only] context['question_only'] = question_only context['rendered_list'] = rendered_list context['seed'] = self.assessment_seed # determine if there were any errors success=True question_errors=[] for (ind,q) in enumerate(rendered_list): if not q["question_data"]["success"]: success=False question_errors.append(str(ind+1)) if not success: context['error_message'] = \ "Errors occurred in the following questions: %s" %\ ", ".join(question_errors) context['success'] = success context['generate_course_attempt_link'] = False context['show_solution_link'] = False course = self.assessment.course context['course'] = course if user_can_administer_assessment(self.request.user, course=course): if self.thread_content: context['generate_course_attempt_link'] = True if not self.solution: context['show_solution_link'] = True if self.thread_content: context['assessment_name'] = self.thread_content.get_title() else: context['assessment_name'] = self.assessment.name if self.solution: context['assessment_name'] += " solution" context['assessment_short_name'] = self.assessment.return_short_name() if self.solution: context['assessment_short_name'] += " sol." if self.version: context['version'] = self.version context['assessment_name_with_version'] = "%s, version %s" % \ (context['assessment_name'], context['version']) context['assessment_short_name_with_version'] = "%s, version %s" % \ (context['assessment_short_name'], context['version']) else: context['version'] = '' context['assessment_name_with_version'] = context['assessment_name'] context['assessment_short_name_with_version'] \ = context['assessment_short_name'] if self.course_enrollment and self.thread_content: if self.course_enrollment.role == STUDENT_ROLE and self.current_attempt: due = self.thread_content.get_adjusted_due( self.current_attempt.record) if course.adjust_due_attendance and due: due_date_url = reverse( 'micourses:adjusted_due_calculation', kwargs={'course_code': course.code, 'content_id': self.thread_content.id } ) from micourses.utils import format_datetime current_tz = timezone.get_current_timezone() due_string = format_datetime(current_tz.normalize( due.astimezone(current_tz))) due = mark_safe('<a href="%s">%s</a>' % \ (due_date_url, due_string)) context['due'] = due else: context['due'] = self.thread_content.get_adjusted_due() context['thread_content'] = self.thread_content context['number_in_thread'] = self.number_in_thread context['current_attempt'] = self.current_attempt context['users attempt'] = False context['multiple_attempts'] = False context['attempt_url']=None context['record_url']=None # set date from current_attempt, else as now if self.current_attempt: context['assessment_date'] = self.current_attempt.attempt_began else: context['assessment_date'] = timezone.now() # Check if have current attempt that belongs to user # (so can show score) # Create links to record and attempts (if valid) if self.current_attempt and \ self.current_attempt.record.enrollment == self.course_enrollment: context['users_attempt'] = True valid_attempt_list = list( self.current_attempt.record.attempts.filter(valid=True)) context['multiple_attempts'] = len(valid_attempt_list)>1 context['record_url'] = reverse( 'micourses:content_record', kwargs={'course_code': course.code, 'content_id': self.thread_content.id}) if self.current_attempt.valid: attempt_number = valid_attempt_list.index(self.current_attempt)\ +1 context['attempt_url'] = reverse( 'micourses:content_attempt', kwargs={'course_code': course.code, 'content_id': self.thread_content.id, 'attempt_number': attempt_number}) # add question attempt urls to rendered_list question_data for (ind,q) in enumerate(rendered_list): q["question_data"]["attempt_url"] = reverse( 'micourses:question_attempts', kwargs={'course_code': course.code, 'content_id': self.thread_content.id, 'attempt_number': attempt_number, 'question_number': ind+1} ) from mitesting.utils import round_and_int if self.thread_content: context['thread_content_points'] = round_and_int( self.thread_content.points) if self.current_attempt is None or self.current_attempt.score is None: context['attempt_score']=0 else: context['attempt_score']=round_and_int( self.current_attempt.score,1) if self.current_attempt is None or \ self.current_attempt.record.score is None: context['content_score']=0 else: context['content_score']=round_and_int( self.current_attempt.record.score,1) # get list of the question numbers in assessment # if instructor or designer in course # if also staff, include links to admin pages if user_can_administer_assessment(self.request.user, course=course): question_numbers=[] if self.request.user.is_staff: context['assessment_admin_link'] = mark_safe( "<p><a href='%s'>%s</a></p>" % ( reverse('admin:micourses_assessment_change', args=(self.assessment.id,)), 'Admin link')) for q in rendered_list: # if staff, add link to admin page for quesiton if self.request.user.is_staff: question_numbers.append( "<a href='%s'>%s</a>" % ( reverse('admin:mitesting_question_change', args=(q['question'].id,)), q['question'].id) ) else: question_numbers.append(str(q['question'].id)) question_numbers = ", ".join(question_numbers) question_numbers = mark_safe(question_numbers) else: question_numbers=None context['question_numbers']=question_numbers # turn off Google analytics for localhost/development site context['noanalytics']=(settings.SITE_ID <= 2) from mitesting.utils import get_new_seed context['new_seed']=get_new_seed(rng) return context
def post(self, request, *args, **kwargs): # Look up the question to grade question = self.get_object() pairs = [s2 for s1 in request.body.split(b'&') for s2 in s1.split(b';')] cgd = None for name_value in pairs: if not name_value: continue nv = name_value.split(b'=', 1) if len(nv) != 2: continue if nv[0]==b'cgd': cgd = nv[1] break import pickle, base64 computer_grade_data = pickle.loads( base64.b64decode(cgd)) question_identifier = computer_grade_data['identifier'] # set up context from question expressions seed = computer_grade_data['seed'] response_data = request.POST answer_info = computer_grade_data['answer_info'] user_responses = [] for answer_num in range(len(answer_info)): answer_identifier = answer_info[answer_num]['identifier'] user_responses.append({ 'identifier': answer_identifier, 'code': answer_info[answer_num]['code'], 'response': response_data.get('answer_%s' % answer_identifier, "")}) question_attempt=None question_attempt_id = computer_grade_data.get("question_attempt_id") if question_attempt_id is not None: try: from micourses.models import QuestionAttempt question_attempt = QuestionAttempt.objects.get( id=question_attempt_id) except QuestionAttempt.DoesNotExist: pass from .grade_question import grade_question answer_results=grade_question( question=question, question_identifier=question_identifier, question_attempt=question_attempt, answer_info=answer_info, user_responses=user_responses, seed=seed) # increment number of attempts try: number_attempts = int(response_data['number_attempts_%s' % question_identifier]) except (KeyError, ValueError): number_attempts=0 number_attempts+=1 answer_results['number_attempts'] = number_attempts show_solution_button = computer_grade_data.get( 'show_solution_button', False) enable_solution_button = False if show_solution_button and \ question.show_solution_button_after_attempts and \ number_attempts >= question.show_solution_button_after_attempts: enable_solution_button = True answer_results['enable_solution_button'] = enable_solution_button record_response = computer_grade_data['record_response'] # if not recording the result of the question, # we're finished, so return response with the results if not (record_response and question_attempt): return JsonResponse(answer_results) content_attempt = question_attempt.content_attempt_question_set\ .content_attempt content_record=content_attempt.record content = content_record.content # Verify that logged in user is the student of the content_record. # If not, don't record results # (Important so instructors viewing student results don't # inadvertantly change student's score.) if not content_record.enrollment \ or request.user.courseuser != content_record.enrollment.student: return JsonResponse(answer_results) # Have question attempt of enrolled student. # Possibly situations # 1. Content is marked as not recording scores. # 2. Content is past due # 3. Content is not yet due # 4. Solution has been viewed # 5. Content is available and solution hasn't been viewed. # # In cases 1-4, an invalid response will be recorded, # and a message will be displayed to indicate the problem. # Score totals will not be updated. # # In case 5, if have an invalid attempt (content or question attempt) # then response will still be treated as invalid even though # the response itself will be marked as valid. # Display a message indicating student should generate a new attempt. # Score totals will not be updated # # In case 5, if have a valid attempt (both content and question attempt) # then record valid response and update scores. record_valid_response = True if not content.record_scores: record_valid_response = False from micourses.models import AVAILABLE, NOT_YET_AVAILABLE, PAST_DUE assessment_availability = content.return_availability(content_record) if assessment_availability != AVAILABLE: record_valid_response = False # check if student already viewed the solution # if so, mark as to not record response if question_attempt.solution_viewed: solution_viewed = True record_valid_response = False else: solution_viewed = False # Record response. # Invalid responses won't count toward score and # won't be viewable by student from micourses.models import QuestionResponse # in case get deadlock, try to save answer (and recalculate score) # five times for trans_i in range(5): try: with transaction.atomic(): QuestionResponse.objects.create( question_attempt=question_attempt, response=json.dumps(user_responses), credit=answer_results['credit'], valid = record_valid_response) except OperationalError: if trans_i==4: raise else: break # if did not have a valid attempt, treat as though # response were marked as invalid, since it won't count. if not (content_attempt.valid and question_attempt.valid): record_valid_response = False answer_results['record_valid_response'] = record_valid_response if not content.record_scores: feedback_message = "Assessment not set up for recording answers.<br/>Answer not recorded." elif assessment_availability == PAST_DUE: current_tz = timezone.get_current_timezone() due = content.get_adjusted_due(content_record) due = current_tz.normalize(due.astimezone(current_tz)) from micourses.utils import format_datetime feedback_message = "Due date %s of %s is past.<br/>Answer not recorded." % (format_datetime(due), content.get_title()) elif assessment_availability == NOT_YET_AVAILABLE: feedback_message = "Assessment is not yet available. <br/>Answer not recorded." elif solution_viewed: feedback_message = "Solution for question already viewed for this attempt.<br/>Answer not recorded. <br/>Generate a new attempt to resume recording answers." elif not (content_attempt.valid and question_attempt.valid): feedback_message = "The current assessment attempt is not valid.<br/>It might have been started before the assessment was available.<br/>Answer not recorded.<br/>Generate a new attempt or reload page to start recording answers." else: feedback_message = "" if record_valid_response: feedback_message += "Answer recorded for %s.<br/>Course: <a href=\"%s\">%s</a>" % (request.user,reverse('micourses:content_record', kwargs={'content_id': content.id, 'course_code': content.course.code} ), content.course) answer_results['feedback'] += "<p>%s</p>" % feedback_message # if didn't record valid response, don't update scores, # so return without setting values if not record_valid_response: return JsonResponse(answer_results) from mitesting.utils import round_and_int question_attempt.refresh_from_db() if question_attempt.credit is None: answer_results['current_percent_credit']=0 else: answer_results['current_percent_credit']=round_and_int( question_attempt.credit*100,1) content_attempt.refresh_from_db() if content_attempt.score is None: answer_results['attempt_score']=0 else: answer_results['attempt_score']=round_and_int( content_attempt.score,1) content_record.refresh_from_db() if content_record.score is None: answer_results['content_score']=0 else: answer_results['content_score']=round_and_int( content_record.score,1) return JsonResponse(answer_results)
def get_context_data(self, **kwargs): context = super(AssessmentOverview, self).get_context_data(**kwargs) # thread_content will be None # if assessment is not in thread and number in thread is 1 try: thread_content = self.assessment.determine_thread_content( self.number_in_thread) except ObjectDoesNotExist: raise Http404("No assessment found") context['thread_content']=thread_content context['number_in_thread'] = self.number_in_thread course = self.assessment.course context['course'] = course if thread_content: context['assessment_name'] = thread_content.get_title() else: context['assessment_name'] = self.assessment.name from mitesting.utils import round_and_int if thread_content: context['thread_content_points'] = round_and_int( thread_content.points) try: student = self.request.user.courseuser except AttributeError: student = None if student: try: cr= thread_content.contentrecord_set.get( enrollment__student=student) except ObjectDoesNotExist: cr = None if cr: student_score = cr.score if student_score is not None: context['content_score']=round_and_int(student_score,1) else: context['content_score'] = '--' context['have_user_score'] = True context['record_url'] = reverse( 'micourses:content_record', kwargs={'course_code': course.code, 'content_id': thread_content.id}) context['assigned'] = thread_content.assigned due = thread_content.get_adjusted_due(student=student) if student and course.adjust_due_attendance and due: due_date_url = reverse( 'micourses:adjusted_due_calculation', kwargs={'course_code': course.code, 'content_id' :thread_content.id } ) from micourses.utils import format_datetime current_tz = timezone.get_current_timezone() due_string = format_datetime(current_tz.normalize( due.astimezone(current_tz))) due = mark_safe('<a href="%s">%s</a>' % \ (due_date_url, due_string)) context['due']=due # user has permission to view the assessment, given privacy level if self.assessment.user_can_view(self.user, solution=False): if self.number_in_thread > 1: get_string = "n=%s" % self.number_in_thread else: get_string="" if thread_content and thread_content.substitute_title: context['assessment_link'] = self.assessment.return_direct_link( link_text=thread_content.substitute_title, get_string=get_string) else: context['assessment_link'] = self.assessment.return_direct_link( get_string=get_string) else: context['assessment_link'] = None # generate assessment link if can administer and thread content exists if thread_content and user_can_administer_assessment( self.user, course=course): context['generate_course_attempt_link'] = True else: context['generate_course_attempt_link'] = False # turn off Google analytics for localhost/development site context['noanalytics']=(settings.SITE_ID <= 2) return context