def download_student_results(self, test, format, template, cohorts=None): """ Make appy based report showing results for each student """ report = TemplateReport() report.file_format = format if cohorts: test_instances = test.active_testinstance_set.filter( student__cohort__in=cohorts).distinct() else: test_instances = test.active_testinstance_set.all() benchmarks = Benchmark.objects.filter(question__test=test).distinct() for benchmark in benchmarks: benchmark.points_possible = test.question_set.filter( benchmarks=benchmark).aggregate( Sum('point_value'))['point_value__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if benchmark.points_possible is None: benchmark.points_possible = 0 for test_instance in test_instances: benchmark_instances = [] for benchmark in benchmarks: benchmark_instance = Struct() benchmark_instance.benchmark = benchmark benchmark_instance.points_possible = benchmark.points_possible benchmark_instance.answers = test_instance.answerinstance_set.filter( question__benchmarks=benchmark) benchmark_instance.points_earned = benchmark_instance.answers.aggregate( Sum('points_earned'))['points_earned__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if benchmark_instance.points_earned is None: benchmark_instance.points_earned = 0 benchmark_instance.questions = '' for answer in benchmark_instance.answers.all(): benchmark_instance.questions += '{}, '.format( answer.question.get_order_start_one) benchmark_instance.questions = benchmark_instance.questions[: -2] benchmark_instances.append(benchmark_instance) test_instance.benchmarks = benchmark_instances test_instance.incorrects = test_instance.answerinstance_set.filter( points_earned__lt=F('points_possible')) for incorrect in test_instance.incorrects: incorrect.benchmarks = '' for benchmark in incorrect.question.benchmarks.all(): incorrect.benchmarks += '{}, '.format(benchmark.number) incorrect.benchmarks = incorrect.benchmarks[:-2] try: incorrect.right_answer = incorrect.question.answer_set.order_by( 'point_value').reverse()[0] except: incorrect.right_answer = "No correct answer" report.data['test'] = test report.data['tests'] = test_instances report.filename = 'Student Results for ' + unicode(test) return report.pod_save(template)
def download_student_results(self, test, format, template): """ Make appy based report showing results for each student """ data = get_default_data() test_instances = test.testinstance_set.all() benchmarks = Benchmark.objects.filter(question__test=test) for benchmark in benchmarks: benchmark.points_possible = test.question_set.filter(benchmarks=benchmark).aggregate(Sum('point_value'))['point_value__sum'] for test_instance in test_instances: benchmark_instances = [] for benchmark in benchmarks: benchmark_instance = Struct() benchmark_instance.benchmark = benchmark benchmark_instance.points_possible = benchmark.points_possible benchmark_instance.points_earned = test_instance.answerinstance_set.filter(question__benchmarks=benchmark).aggregate(Sum('points_earned'))['points_earned__sum'] benchmark_instances.append(benchmark_instance) test_instance.benchmarks = benchmark_instances test_instance.incorrects = test_instance.answerinstance_set.filter(points_earned__lt=F('points_possible')) for incorrect in test_instance.incorrects: try: incorrect.right_answer = incorrect.question.answer_set.order_by('point_value').reverse()[0] except: incorrect.right_answer = "No correct answer" data['test'] = test data['tests'] = test_instances filename = 'Student Results for ' + unicode(test) return pod_save(filename, "." + str(format), data, template)
def download_student_results(self, test, format, template, cohorts=None): """ Make appy based report showing results for each student """ report = TemplateReport() report.file_format = format if cohorts: test_instances = test.active_testinstance_set.filter(student__cohort__in=cohorts).distinct() else: test_instances = test.active_testinstance_set.all() benchmarks = Benchmark.objects.filter(question__test=test).distinct() for benchmark in benchmarks: benchmark.points_possible = test.question_set.filter(benchmarks=benchmark).aggregate(Sum('point_value'))['point_value__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if benchmark.points_possible is None: benchmark.points_possible = 0 for test_instance in test_instances: benchmark_instances = [] for benchmark in benchmarks: benchmark_instance = Struct() benchmark_instance.benchmark = benchmark benchmark_instance.points_possible = benchmark.points_possible benchmark_instance.answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark) benchmark_instance.points_earned = benchmark_instance.answers.aggregate(Sum('points_earned'))['points_earned__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if benchmark_instance.points_earned is None: benchmark_instance.points_earned = 0 benchmark_instance.questions = '' for answer in benchmark_instance.answers.all(): benchmark_instance.questions += '{}, '.format(answer.question.get_order_start_one) benchmark_instance.questions = benchmark_instance.questions[:-2] benchmark_instances.append(benchmark_instance) test_instance.benchmarks = benchmark_instances test_instance.incorrects = test_instance.answerinstance_set.filter(points_earned__lt=F('points_possible')) for incorrect in test_instance.incorrects: incorrect.benchmarks = '' for benchmark in incorrect.question.benchmarks.all(): incorrect.benchmarks += '{}, '.format(benchmark.number) incorrect.benchmarks = incorrect.benchmarks[:-2] try: incorrect.right_answer = incorrect.question.answer_set.order_by('point_value').reverse()[0] except: incorrect.right_answer = "No correct answer" report.data['test'] = test report.data['tests'] = test_instances report.filename = 'Student Results for ' + unicode(test) return report.pod_save(template)
def download_student_results(self, test, format, template): """ Make appy based report showing results for each student """ report = TemplateReport() report.file_format = format test_instances = test.active_testinstance_set.all() benchmarks = Benchmark.objects.filter(question__test=test).distinct() for benchmark in benchmarks: benchmark.points_possible = test.question_set.filter(benchmarks=benchmark).aggregate(Sum("point_value"))[ "point_value__sum" ] for test_instance in test_instances: benchmark_instances = [] for benchmark in benchmarks: benchmark_instance = Struct() benchmark_instance.benchmark = benchmark benchmark_instance.points_possible = benchmark.points_possible benchmark_instance.answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark) benchmark_instance.points_earned = benchmark_instance.answers.aggregate(Sum("points_earned"))[ "points_earned__sum" ] benchmark_instance.questions = "" for answer in benchmark_instance.answers.all(): benchmark_instance.questions += "{}, ".format(answer.question.get_order_start_one) benchmark_instance.questions = benchmark_instance.questions[:-2] benchmark_instances.append(benchmark_instance) test_instance.benchmarks = benchmark_instances test_instance.incorrects = test_instance.answerinstance_set.filter(points_earned__lt=F("points_possible")) for incorrect in test_instance.incorrects: incorrect.benchmarks = "" for benchmark in incorrect.question.benchmarks.all(): incorrect.benchmarks += "{}, ".format(benchmark.number) incorrect.benchmarks = incorrect.benchmarks[:-2] try: incorrect.right_answer = incorrect.question.answer_set.order_by("point_value").reverse()[0] except: incorrect.right_answer = "No correct answer" report.data["test"] = test report.data["tests"] = test_instances report.filename = "Student Results for " + unicode(test) return report.pod_save(template)