def post_csv_grades(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] try: exam = Exam.objects.get(course = course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 #Only process ready mode exams and courses if course.mode == "draft": course = course.image if exam.mode == "draft": exam = exam.image good_count = 0 exam_scores = {} #this will be the student score created bad_rows = [] db_hits = 0 if request.FILES: row_count=0; #We build up the records to be saved as a dict for f in request.FILES.itervalues(): reader = csv.reader(f) for row in reader: # each row should be: "<student_username>", "<field_name>", "subscore" row_count += 1 valid, output = validate_row(row) if not valid: bad_rows.append(str(row_count) + ":" + str(row) + " => " + output) logger.error(str(row_count) + ":" + str(row) + " => " + output) else: (username, field_name, score) = output if username in exam_scores: exam_scores[username]['fields'].append({'name':field_name, 'score':score}) exam_scores[username]['total'] += score #increment total score else: exam_scores[username]={'fields':[{'name':field_name, 'score':score}], 'total':score} #initialize good_count += 1 if good_count % 100 == 0: print str(good_count) # Find the appropriate ExamRecord, for each student, to update. In this case, appropriate # means the last submission prior to the grace_period. exam_record_ptrs = ExamRecord.objects.values('student__username').filter(exam=exam, exam__grace_period__gt=F('time_created')).annotate(last_submission_id=Max('id')) student_count = 0 for username, record in exam_scores.iteritems(): try: user = User.objects.get(username=username) user_score, created = ExamScore.objects.get_or_create(course=course, exam=exam, student=user) db_hits += 2 #Total score for this user total_score = sum(map(lambda r:r['score'], record['fields'])) if total_score != record['total']: bad_rows.append(username + ": total does not match sum of subscores. Sum:" + str(total_score) + " Total:" + str(record['total'])) logger.error(username + ": total does not match sum of subscores. Sum:" + str(total_score) + " Total:" + str(record['total'])) total_score = max(record['total'],0) #0 is the floor score #Find the ExamRecord for this user for exam_record_ptr in exam_record_ptrs: if exam_record_ptr['student__username'] == username: break else: exam_record_ptr = None if exam_record_ptr: if not created: #Delete the ExamRecordScore, ExamRecordScoreFields and ExamRecordScoreFieldChoices ExamRecordScoreFieldChoice.objects.filter(parent__parent__record_id=exam_record_ptr['last_submission_id']).delete() ExamRecordScoreField.objects.filter(parent__record_id=exam_record_ptr['last_submission_id']).delete() ExamRecordScore.objects.filter(record_id=exam_record_ptr['last_submission_id']).delete() db_hits += 3 #Create the new ExamRecordScore ers = ExamRecordScore(record_id=exam_record_ptr['last_submission_id'], raw_score=total_score, csv_imported=True) ers.save() db_hits += 1 #Create the new ExamRecordscoreFields field_objs = map(lambda f:ExamRecordScoreField(parent=ers, field_name=f['name'], subscore=f['score']), record['fields']) ExamRecordScoreField.objects.bulk_create(field_objs) db_hits += 1 #Update score for ExamRecord er = ExamRecord.objects.get(id=exam_record_ptr['last_submission_id']) er.score = total_score er.save() db_hits += 1 #Set score for ExamScore user_score.score = total_score user_score.csv_imported = True user_score.save() db_hits += 1 student_count += 1 if student_count % 100 == 0: print str(student_count) except User.DoesNotExist: bad_rows.append(username + " not found in students list!") logger.error(username + " not found in students list!") logger.info("Good count: %d Student count: %d DB Hits: %d Bad rows:%s" % (good_count, student_count, db_hits, str(bad_rows))) return render_to_response('exams/csv_upload_confirm.html', {'common_page_data':request.common_page_data, 'exam':exam, 'good_count':good_count, 'db_hits':db_hits, 'student_count':student_count, 'bad_rows':bad_rows}, RequestContext(request))
def handle(self, *args, **options): errors = 0 regrades = 0 updates = 0 if len(args) != 1: raise CommandError("exam id is required") examid = args[0] exam_obj = Exam.objects.get(id__exact=examid) autograder = AutoGrader(exam_obj.xml_metadata) examRecords = ExamRecord.objects \ .select_related('examrecordscore', 'student') \ .filter(exam_id__exact=examid, complete=True) if not parser and (options['start_time'] or options['end_time']): raise CommandError("Can't parse start and end times without having 'dateutil' installed.\nSee http://labix.org/python-dateutil") if options['start_time']: start = parser.parse(options['start_time']) examRecords = examRecords.filter(time_created__gt=start) if options['end_time']: end = parser.parse(options['end_time']) examRecords = examRecords.filter(time_created__lt=end) if options['student_ids']: sidlist = options['student_ids'].split(',') examRecords = examRecords.filter(student__in=sidlist) # search in reverse ID order so that the latest attempts get precedence. If two # attempts have the same score, then want the latest attempt to be the one that # matters. examRecords = examRecords.order_by('-id') # this executes the query if len(examRecords) == 0: print "warning: no exam records found, is that what you intended?" return count = 1 for er in examRecords: ers_created = False ers = er.examrecordscore if ers is None: ers = ExamRecordScore(record=er, raw_score=0.0) ers_id_string = "new" ers_created = True else: ers_id_string = str(ers.id) print "ExamRecord %d, %d of %d".encode('ascii','ignore') % (er.id, count, len(examRecords)) count += 1 try: score_before = er.score rawscore_before = ers.raw_score if score_before == None: # scores of 0 come back from model as None score_before = 0.0 # not sure why but they do if rawscore_before == None: rawscore_before = 0.0 score_after = 0.0 rawscore_after = 0.0 submitted = json.loads(er.json_data) regrade = {} for prob, v in submitted.iteritems(): if isinstance(v,list): # multiple choice case student_input = map(lambda li: li['value'], v) regrade[prob] = autograder.grade(prob, student_input) else: # single answer case student_input = v['value'] regrade[prob] = autograder.grade(prob, student_input) if 'feedback' in regrade[prob]: del regrade[prob]['feedback'] # remove giant feedback field if 'score' in regrade[prob]: rawscore_after += float(regrade[prob]['score']) is_late = er.time_created > exam_obj.grace_period if er.attempt_number == 0: print "ERROR: examrecord %d: skip, attempt_number=0".encode('ascii','ignore') \ % er.id errors += 1 continue if options['penalties']: days_late = er.days_late(grace_period=exam_obj.grace_period) score_after = compute_penalties(rawscore_after, er.attempt_number, exam_obj.resubmission_penalty, is_late, exam_obj.late_penalty, late_days=days_late, daily_late_penalty=exam_obj.daily_late_penalty) else: score_after = rawscore_after s = er.student try: es = ExamScore.objects.get(exam=exam_obj, student=s) es_id_string = str(es.id) examscore_before = es.score except ExamScore.DoesNotExist: es = ExamScore(course=er.course, exam=exam_obj, student=s) es_id_string = "new" examscore_before = -1 examscore_after = max(examscore_before, score_after) #raw = raw score, score = with penalties, agg = exam_score, over all attempts status_line = u"\"%s\", \"%s\", %s, %s, %s, " \ % (s.first_name, s.last_name, s.username, s.email, er.time_created) status_line += u"raw[%s]:%0.1f->%0.1f " \ % (ers_id_string, rawscore_before, rawscore_after) status_line += u"score[%d]:%0.1f->%0.1f " \ % (er.id, score_before, score_after) status_line += u"agg[%s]:%0.1f->%0.1f " \ % (es_id_string, examscore_before, examscore_after) status_line += u"late:%d->%d" \ % (er.late, is_late) if score_before == score_after and rawscore_before == rawscore_after \ and examscore_before == examscore_after and is_late == er.late : print "OK: " + status_line.encode('ascii','ignore') continue regrades += 1 print "REGRADE: " + status_line.encode('ascii','ignore') if not options['dryrun']: if score_before != score_after or is_late != er.late: er.json_score_data = json.dumps(regrade) er.score = score_after er.late = is_late er.save() updates += 1 if ers_created or rawscore_before != rawscore_after: ers.raw_score = rawscore_after ers.save() updates += 1 if examscore_before != examscore_after: es.score = examscore_after es.examrecordscore = ers es.save() updates += 1 # exception handler around big ExamRecords loop -- trust me, it lines up # this just counts and skips offending rows so we can keep making progress except Exception as e: print u"ERROR: examrecord %d: cannot regrade: %s".encode('ascii','ignore') \ % (er.id, unicode(e)) errors += 1 continue print print "## SUMMARY ##" print "# Errors: %d" % errors print "# Regrades: %d" % regrades print "# Database rows updated: %d" % updates
def collect_data(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] user = request.user try: exam = Exam.objects.get(course = course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 postdata = request.POST['json_data'] #will return an error code to the user if either of these fail (throws 500) json_obj=json.loads(postdata) if exam.mode == "ready" and exam.past_all_deadlines(): return HttpResponseBadRequest("Sorry! This submission is past the last deadline of %s" % \ datetime.datetime.strftime(exam.partial_credit_deadline, "%m/%d/%Y %H:%M PST")); attempt_number = exam.num_of_student_records(user)+1 onpage = request.POST.get('onpage','') record = ExamRecord(course=course, exam=exam, student=user, json_data=postdata, onpage=onpage, attempt_number=attempt_number, late=exam.past_due()) record.save() autograder = None if exam.exam_type == "survey": autograder = AutoGrader("<null></null>", default_return=True) #create a null autograder that always returns the "True" object elif exam.autograde: try: autograder = AutoGrader(exam.xml_metadata) except Exception as e: #Pass back all the exceptions so user can see return HttpResponseBadRequest(unicode(e)) if autograder: record_score = ExamRecordScore(record = record) record_score.save() feedback = {} total_score = 0 for prob,v in json_obj.iteritems(): #prob is the "input" id, v is the associated value, #which can be an object (input box) or a list of objects (multiple-choice) try: if isinstance(v,list): #multiple choice case submission = map(lambda li: li['value'], v) feedback[prob] = autograder.grade(prob, submission) field_obj = ExamRecordScoreField(parent=record_score, field_name = prob, human_name=v[0].get('questionreport', "") if len(v)>0 else "", subscore = feedback[prob]['score'], value = map(lambda li:li.encode('utf-8'),submission), correct = feedback[prob]['correct'], comments="", associated_text = v[0].get('associatedText', "") if len(v)>0 else "", ) field_obj.save() for li in v: if 'correct_choices' not in feedback[prob]: is_correct = None else: is_correct = li['value'] in feedback[prob]['correct_choices'] fc = ExamRecordScoreFieldChoice(parent=field_obj, choice_value=li['value'], correct=is_correct, human_name=li.get('report',""), associated_text=li.get('associatedText',"")) fc.save() else: #single answer submission = v['value'] feedback[prob] = autograder.grade(prob, submission) field_obj = ExamRecordScoreField(parent=record_score, field_name = prob, human_name=v.get('report', ""), subscore = feedback[prob]['score'], value = submission, correct = feedback[prob]['correct'], comments="", associated_text = v.get('associatedText', "")) field_obj.save() except AutoGraderGradingException as e: feedback[prob]={'correct':False, 'score':0} field_obj = ExamRecordScoreField(parent=record_score, field_name = prob, human_name=v.get('report', ""), subscore = 0, correct = feedback[prob]['correct'], comments = unicode(e), associated_text = v.get('associatedText', "")) field_obj.save() #This is when using code indents to denote blocks is a bit hairy #supposed to be at the same level as try...except. Run once per prob,v total_score += feedback[prob]['score'] #Set raw score for ExamRecordScore record_score.raw_score = total_score record_score.save() #Set penalty inclusive score for ExamRecord record.json_score_data = json.dumps(feedback) #apply resubmission penalty resubmission_penalty_percent = pow(((100 - exam.resubmission_penalty)/100), (attempt_number -1)) total_score = max(total_score * resubmission_penalty_percent, 0) #apply the late penalty if exam.grace_period and exam.late_penalty > 0 and datetime.datetime.now() > exam.grace_period: total_score = max(total_score * ((100 - exam.late_penalty)/100), 0) record.score = total_score record.save() #Set ExamScore.score to max of ExamRecord.score for that student, exam. exam_score, created = ExamScore.objects.get_or_create(course=course, exam=exam, student=user) exam_score.setScore() return HttpResponse(reverse(exam.record_view, args=[course.prefix, course.suffix, exam.slug, record.id])) else: return HttpResponse("Submission has been saved.")
def handle(self, *args, **options): errors = 0 regrades = 0 updates = 0 if len(args) != 1: raise CommandError("exam id is required") examid = args[0] exam_obj = Exam.objects.get(id__exact=examid) autograder = AutoGrader(exam_obj.xml_metadata) examRecords = ExamRecord.objects \ .select_related('examrecordscore', 'student') \ .filter(exam_id__exact=examid, complete=True) if not parser and (options['start_time'] or options['end_time']): raise CommandError( "Can't parse start and end times without having 'dateutil' installed.\nSee http://labix.org/python-dateutil" ) if options['start_time']: start = parser.parse(options['start_time']) examRecords = examRecords.filter(time_created__gt=start) if options['end_time']: end = parser.parse(options['end_time']) examRecords = examRecords.filter(time_created__lt=end) if options['student_ids']: sidlist = options['student_ids'].split(',') examRecords = examRecords.filter(student__in=sidlist) # search in reverse ID order so that the latest attempts get precedence. If two # attempts have the same score, then want the latest attempt to be the one that # matters. examRecords = examRecords.order_by('-id') # this executes the query if len(examRecords) == 0: print "warning: no exam records found, is that what you intended?" return count = 1 for er in examRecords: ers_created = False ers = er.examrecordscore if ers is None: ers = ExamRecordScore(record=er, raw_score=0.0) ers_id_string = "new" ers_created = True else: ers_id_string = str(ers.id) print "ExamRecord %d, %d of %d".encode( 'ascii', 'ignore') % (er.id, count, len(examRecords)) count += 1 try: score_before = er.score rawscore_before = ers.raw_score if score_before == None: # scores of 0 come back from model as None score_before = 0.0 # not sure why but they do if rawscore_before == None: rawscore_before = 0.0 score_after = 0.0 rawscore_after = 0.0 submitted = json.loads(er.json_data) regrade = {} for prob, v in submitted.iteritems(): if isinstance(v, list): # multiple choice case student_input = map(lambda li: li['value'], v) regrade[prob] = autograder.grade(prob, student_input) else: # single answer case student_input = v['value'] regrade[prob] = autograder.grade(prob, student_input) if 'feedback' in regrade[prob]: del regrade[prob][ 'feedback'] # remove giant feedback field if 'score' in regrade[prob]: rawscore_after += float(regrade[prob]['score']) is_late = er.time_created > exam_obj.grace_period if er.attempt_number == 0: print "ERROR: examrecord %d: skip, attempt_number=0".encode('ascii','ignore') \ % er.id errors += 1 continue if options['penalties']: days_late = er.days_late( grace_period=exam_obj.grace_period) score_after = compute_penalties( rawscore_after, er.attempt_number, exam_obj.resubmission_penalty, is_late, exam_obj.late_penalty, late_days=days_late, daily_late_penalty=exam_obj.daily_late_penalty) else: score_after = rawscore_after s = er.student try: es = ExamScore.objects.get(exam=exam_obj, student=s) es_id_string = str(es.id) examscore_before = es.score except ExamScore.DoesNotExist: es = ExamScore(course=er.course, exam=exam_obj, student=s) es_id_string = "new" examscore_before = -1 examscore_after = max(examscore_before, score_after) #raw = raw score, score = with penalties, agg = exam_score, over all attempts status_line = u"\"%s\", \"%s\", %s, %s, %s, " \ % (s.first_name, s.last_name, s.username, s.email, er.time_created) status_line += u"raw[%s]:%0.1f->%0.1f " \ % (ers_id_string, rawscore_before, rawscore_after) status_line += u"score[%d]:%0.1f->%0.1f " \ % (er.id, score_before, score_after) status_line += u"agg[%s]:%0.1f->%0.1f " \ % (es_id_string, examscore_before, examscore_after) status_line += u"late:%d->%d" \ % (er.late, is_late) if score_before == score_after and rawscore_before == rawscore_after \ and examscore_before == examscore_after and is_late == er.late : print "OK: " + status_line.encode('ascii', 'ignore') continue regrades += 1 print "REGRADE: " + status_line.encode('ascii', 'ignore') if not options['dryrun']: if score_before != score_after or is_late != er.late: er.json_score_data = json.dumps(regrade) er.score = score_after er.late = is_late er.save() updates += 1 if ers_created or rawscore_before != rawscore_after: ers.raw_score = rawscore_after ers.save() updates += 1 if examscore_before != examscore_after: es.score = examscore_after es.examrecordscore = ers es.save() updates += 1 # exception handler around big ExamRecords loop -- trust me, it lines up # this just counts and skips offending rows so we can keep making progress except Exception as e: print u"ERROR: examrecord %d: cannot regrade: %s".encode('ascii','ignore') \ % (er.id, unicode(e)) errors += 1 continue print print "## SUMMARY ##" print "# Errors: %d" % errors print "# Regrades: %d" % regrades print "# Database rows updated: %d" % updates
def collect_data(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] try: exam = Exam.objects.get(course = course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 postdata = request.POST['json_data'] #will return an error code to the user if either of these fail (throws 500) json_obj=json.loads(postdata) record = ExamRecord(course=course, exam=exam, student=request.user, json_data=postdata) record.save() autograder = None if exam.exam_type == "survey": autograder = AutoGrader("<null></null>", default_return=True) #create a null autograder that always returns the "True" object elif exam.autograde: try: autograder = AutoGrader(exam.xml_metadata) except Exception as e: #Pass back all the exceptions so user can see return HttpResponseBadRequest(unicode(e)) if autograder: record_score = ExamRecordScore(record = record) record_score.save() feedback = {} total_score = 0 for prob,v in json_obj.iteritems(): try: if isinstance(v,list): #multiple choice case submission = map(lambda li: li['value'], v) feedback[prob] = autograder.grade(prob, submission) field_obj = ExamRecordScoreField(parent=record_score, field_name = prob, human_name=v[0].get('questiontag4humans', "") if len(v)>0 else "", subscore = feedback[prob]['score'], value = submission, correct = feedback[prob]['correct'], comments="", associated_text = v[0].get('associatedText', "") if len(v)>0 else "", ) field_obj.save() for li in v: fc = ExamRecordScoreFieldChoice(parent=field_obj, choice_value=li['value'], human_name=li.get('tag4humans',""), associated_text=li.get('associatedText',"")) fc.save() else: #single answer submission = v['value'] feedback[prob] = autograder.grade(prob, submission) field_obj = ExamRecordScoreField(parent=record_score, field_name = prob, human_name=v.get('questiontag4humans', ""), subscore = feedback[prob]['score'], value = submission, correct = feedback[prob]['correct'], comments="", associated_text = v.get('associatedText', "")) field_obj.save() except AutoGraderGradingException as e: feedback[prob]={'correct':False, 'score':0} field_obj = ExamRecordScoreField(parent=record_score, field_name = prob, human_name=v.get('questiontag4humans', ""), subscore = 0, correct = feedback[prob]['correct'], comments = unicode(e), associated_text = v.get('associatedText', "")) field_obj.save() #This is when using code indents to denote blocks is a bit hairy #supposed to be at the same level as try...except. Run once per prob,v total_score += feedback[prob]['score'] record_score.score = total_score record_score.save() record_score.copyToExamScore() #Make this score the current ExamScore record.json_score_data = json.dumps(feedback) record.score = total_score record.save() return HttpResponse(json.dumps(feedback)) else: return HttpResponse("Submission has been saved.")
def handle(self, *args, **options): errors = 0 regrades = 0 updates = 0 if len(args) != 1: raise CommandError("exam id is required") examid = args[0] exam_obj = Exam.objects.get(id__exact=examid) autograder = AutoGrader(exam_obj.xml_metadata) examRecords = ExamRecord.objects \ .select_related('examrecordscore', 'student') \ .filter(exam_id__exact=examid, complete=True) if options['start_time']: start = parser.parse(options['start_time']) examRecords = examRecords.filter(time_created__gt=start) if options['end_time']: end = parser.parse(options['end_time']) examRecords = examRecords.filter(time_created__lt=end) if options['student_ids']: sidlist = string.split(options['student_ids'], ',') examRecords = examRecords.filter(student__in=sidlist) # this executes the query if len(examRecords) == 0: print "warning: no exam records found, is that what you intended?" return count = 1 for er in examRecords: ers_created = False ers = er.examrecordscore if ers is None: ers = ExamRecordScore(record=er, raw_score=0.0) ers_created = True print "ExamRecord %d, %d of %d" % (er.id, count, len(examRecords)) count += 1 try: score_before = er.score rawscore_before = ers.raw_score if score_before == None: # scores of 0 come back from model as None score_before = 0.0 # not sure why but they do if rawscore_before == None: # scores of 0 come back from model as None rawscore_before = 0.0 # not sure why but they do score_after = 0.0 rawscore_after = 0.0 submitted = json.loads(er.json_data) regrade = {} for prob, v in submitted.iteritems(): if isinstance(v,list): # multiple choice case student_input = map(lambda li: li['value'], v) regrade[prob] = autograder.grade(prob, student_input) else: # single answer case student_input = v['value'] regrade[prob] = autograder.grade(prob, student_input) if 'feedback' in regrade[prob]: del regrade[prob]['feedback'] # remove giant feedback field if 'score' in regrade[prob]: rawscore_after += float(regrade[prob]['score']) is_late = er.time_created > exam_obj.grace_period if er.attempt_number == 0: print "ERROR: examrecord %d: skip, attempt_number=0" % er.id errors += 1 next score_after = compute_penalties(rawscore_after, er.attempt_number, exam_obj.resubmission_penalty, is_late, exam_obj.late_penalty) s = er.student try: es = ExamScore.objects.get(exam=exam_obj, student=s) examscore_before = es.score except ExamScore.DoesNotExist: es = ExamScore(course=er.course, exam=exam_obj, student=s) examscore_before = -1 examscore_after = max(examscore_before, score_after) #raw = raw score, score = with penalties, agg = exam_score, over all attempts status_line = "%d, \"%s\", \"%s\", %s, %s, %s, raw:%0.1f->%0.1f score:%0.1f->%0.1f agg:%0.1f->%0.1f late:%d->%d" \ % (er.id, s.first_name, s.last_name, s.username, s.email, str(er.time_created), rawscore_before, rawscore_after, score_before, score_after, examscore_before, examscore_after, er.late, is_late) if score_before == score_after and rawscore_before == rawscore_after \ and examscore_before == examscore_after and is_late == er.late : print "OK: " + status_line continue regrades += 1 print "REGRADE: " + status_line if not options['dryrun']: if score_before != score_after or is_late != er.late: er.json_score_data = json.dumps(regrade) er.score = score_after er.late = is_late er.save() updates += 1 if ers_created or rawscore_before != rawscore_after: ers.raw_score = rawscore_after ers.save() updates += 1 if examscore_before != examscore_after: es.score = examscore_after es.save() updates += 1 # exception handler around big ExamRecords loop -- trust me, it lines up # this just counts and skips offending rows so we can keep making progress except Exception as e: print "ERROR: examrecord %d: cannot regrade: %s" % (er.id, str(e)) errors += 1 continue print print "## SUMMARY ##" print "# Errors: %d" % errors print "# Regrades: %d" % regrades print "# Database rows updated: %d" % updates
def collect_data(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] user = request.user try: exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 postdata = request.POST[ 'json_data'] #will return an error code to the user if either of these fail (throws 500) json_obj = json.loads(postdata) if exam.past_all_deadlines(): return HttpResponseBadRequest("Sorry! This submission is past the last deadline of %s" % \ datetime.datetime.strftime(exam.partial_credit_deadline, "%m/%d/%Y %H:%M PST")) attempt_number = exam.num_of_student_records(user) + 1 onpage = request.POST.get('onpage', '') record = ExamRecord(course=course, exam=exam, student=user, json_data=postdata, onpage=onpage, attempt_number=attempt_number, late=exam.past_due()) record.save() autograder = None if exam.exam_type == "survey": autograder = AutoGrader( "<null></null>", default_return=True ) #create a null autograder that always returns the "True" object elif exam.autograde: try: autograder = AutoGrader(exam.xml_metadata) except Exception as e: #Pass back all the exceptions so user can see return HttpResponseBadRequest(unicode(e)) if autograder: record_score = ExamRecordScore(record=record) record_score.save() feedback = {} total_score = 0 for prob, v in json_obj.iteritems( ): #prob is the "input" id, v is the associated value, #which can be an object (input box) or a list of objects (multiple-choice) try: if isinstance(v, list): #multiple choice case submission = map(lambda li: li['value'], v) feedback[prob] = autograder.grade(prob, submission) field_obj = ExamRecordScoreField( parent=record_score, field_name=prob, human_name=v[0].get('questionreport', "") if len(v) > 0 else "", subscore=feedback[prob]['score'], value=submission, correct=feedback[prob]['correct'], comments="", associated_text=v[0].get('associatedText', "") if len(v) > 0 else "", ) field_obj.save() for li in v: if 'correct_choices' not in feedback[prob]: is_correct = None else: is_correct = li['value'] in feedback[prob][ 'correct_choices'] fc = ExamRecordScoreFieldChoice( parent=field_obj, choice_value=li['value'], correct=is_correct, human_name=li.get('report', ""), associated_text=li.get('associatedText', "")) fc.save() else: #single answer submission = v['value'] feedback[prob] = autograder.grade(prob, submission) field_obj = ExamRecordScoreField( parent=record_score, field_name=prob, human_name=v.get('report', ""), subscore=feedback[prob]['score'], value=submission, correct=feedback[prob]['correct'], comments="", associated_text=v.get('associatedText', "")) field_obj.save() except AutoGraderGradingException as e: feedback[prob] = {'correct': False, 'score': 0} field_obj = ExamRecordScoreField( parent=record_score, field_name=prob, human_name=v.get('report', ""), subscore=0, correct=feedback[prob]['correct'], comments=unicode(e), associated_text=v.get('associatedText', "")) field_obj.save() #This is when using code indents to denote blocks is a bit hairy #supposed to be at the same level as try...except. Run once per prob,v total_score += feedback[prob]['score'] #Set raw score for ExamRecordScore record_score.raw_score = total_score record_score.save() #Set penalty inclusive score for ExamRecord record.json_score_data = json.dumps(feedback) #apply resubmission penalty resubmission_penalty_percent = pow( ((100 - exam.resubmission_penalty) / 100), (attempt_number - 1)) total_score = max(total_score * resubmission_penalty_percent, 0) #apply the late penalty if exam.grace_period and exam.late_penalty > 0 and datetime.datetime.now( ) > exam.grace_period: total_score = max(total_score * ((100 - exam.late_penalty) / 100), 0) record.score = total_score record.save() #Set ExamScore.score to max of ExamRecord.score for that student, exam. exam_score, created = ExamScore.objects.get_or_create(course=course, exam=exam, student=user) exam_score.setScore() return HttpResponse( reverse(exam.record_view, args=[course.prefix, course.suffix, exam.slug, record.id])) else: return HttpResponse("Submission has been saved.")