def performance_review(submission: models.Submission): exam = submission.question.exam exam_questions = select(q for q in Question if q.exam == exam)[:] total_marks, total_number_of_questions = 0, 0 for exam_question in exam_questions: total_marks += exam_question.marks total_number_of_questions += 1 learner_submissions = select( s for s in Submission if s.question.exam == exam and s.user == submission.user)[:] ticks = 0 crosses = 0 unmarked = 0 marks_obtained = 0 for s in learner_submissions: if s.mark == Mark.tick or s.mark == Mark.auto_tick: ticks += 1 marks_obtained += s.marks_obtained elif s.mark == Mark.cross or s.mark == Mark.auto_cross: crosses += 1 elif s.mark == Mark.unmarked: unmarked += 1 else: pass percentage = int(marks_obtained / total_marks * 100) grade = Grade.get(lambda g: percentage >= g.starting_percentage and percentage <= g.ending_percentage) performance = Performance.get(user=submission.user, exam=exam) if not performance: performance_data = dict( ticks=ticks, crosses=crosses, unmarked=unmarked, marks_obtained=marks_obtained, total_marks=total_marks, total_number_of_questions=total_number_of_questions, percentage=percentage, grade=grade, exam=exam, user=submission.user) performance = Performance(**performance_data) else: performance.ticks = ticks performance.crosses = crosses performance.unmarked = unmarked performance.marks_obtained = marks_obtained performance.total_marks = total_marks performance.percentage = percentage performance.grade = grade logger.debug(performance.to_dict()) return performance