Example #1
0
 def generate_contract_file(self):
     report = TemplateReport()
     report.data['contract'] = self
     report.filename = unicode(self.company) + "_contract"
     if settings.PREFERED_FORMAT == "m":
         report.file_format = "doc"
     else:
         report.file_format = "odt"
     if self.company and self.company.alternative_contract_template:
         template = self.company.alternative_contract_template.file
     else:
         template = Template.get_or_make_blank(name="Work Study Contract").file.path
     if template :
         report_file = report.pod_save(template, get_tmp_file=True)
         self.contract_file.save(unicode(self.company) + "." + unicode(report.file_format), File(open(report_file)))
Example #2
0
 def generate_contract_file(self):
     report = TemplateReport()
     report.data['contract'] = self
     report.filename = unicode(self.company) + "_contract"
     if settings.PREFERED_FORMAT == "m":
         report.file_format = "doc"
     else:
         report.file_format = "odt"
     if self.company and self.company.alternative_contract_template:
         template = self.company.alternative_contract_template.file
     else:
         template = Template.get_or_make_blank(name="Work Study Contract").file.path
     if template :
         report_file = report.pod_save(template, get_tmp_file=True)
         self.contract_file.save(unicode(self.company) + "." + unicode(report.file_format), File(open(report_file)))
Example #3
0
    def download_student_results(self, test, format, template, cohorts=None):
        """ Make appy based report showing results for each student """
        report = TemplateReport()
        report.file_format = format
        if cohorts:
            test_instances = test.active_testinstance_set.filter(
                student__cohort__in=cohorts).distinct()
        else:
            test_instances = test.active_testinstance_set.all()
        benchmarks = Benchmark.objects.filter(question__test=test).distinct()

        for benchmark in benchmarks:
            benchmark.points_possible = test.question_set.filter(
                benchmarks=benchmark).aggregate(
                    Sum('point_value'))['point_value__sum']
            # aggregate() may return None, which will raise a TypeError upon attempting arithmetic
            if benchmark.points_possible is None:
                benchmark.points_possible = 0

        for test_instance in test_instances:
            benchmark_instances = []
            for benchmark in benchmarks:
                benchmark_instance = Struct()
                benchmark_instance.benchmark = benchmark
                benchmark_instance.points_possible = benchmark.points_possible
                benchmark_instance.answers = test_instance.answerinstance_set.filter(
                    question__benchmarks=benchmark)
                benchmark_instance.points_earned = benchmark_instance.answers.aggregate(
                    Sum('points_earned'))['points_earned__sum']
                # aggregate() may return None, which will raise a TypeError upon attempting arithmetic
                if benchmark_instance.points_earned is None:
                    benchmark_instance.points_earned = 0
                benchmark_instance.questions = ''
                for answer in benchmark_instance.answers.all():
                    benchmark_instance.questions += '{}, '.format(
                        answer.question.get_order_start_one)
                benchmark_instance.questions = benchmark_instance.questions[:
                                                                            -2]
                benchmark_instances.append(benchmark_instance)
            test_instance.benchmarks = benchmark_instances

            test_instance.incorrects = test_instance.answerinstance_set.filter(
                points_earned__lt=F('points_possible'))
            for incorrect in test_instance.incorrects:
                incorrect.benchmarks = ''
                for benchmark in incorrect.question.benchmarks.all():
                    incorrect.benchmarks += '{}, '.format(benchmark.number)
                incorrect.benchmarks = incorrect.benchmarks[:-2]

                try:
                    incorrect.right_answer = incorrect.question.answer_set.order_by(
                        'point_value').reverse()[0]
                except:
                    incorrect.right_answer = "No correct answer"

        report.data['test'] = test
        report.data['tests'] = test_instances

        report.filename = 'Student Results for ' + unicode(test)
        return report.pod_save(template)
Example #4
0
 def download_student_results(self, test, format, template):
     """ Make appy based report showing results for each student """
     report = TemplateReport()
     report.file_format = format
     test_instances = test.testinstance_set.all()
     benchmarks = Benchmark.objects.filter(question__test=test)
     
     for benchmark in benchmarks:
         benchmark.points_possible = test.question_set.filter(benchmarks=benchmark).aggregate(Sum('point_value'))['point_value__sum']
     
     for test_instance in test_instances:
         benchmark_instances = []
         for benchmark in benchmarks:
             benchmark_instance = Struct()
             benchmark_instance.benchmark = benchmark
             benchmark_instance.points_possible = benchmark.points_possible
             benchmark_instance.points_earned = test_instance.answerinstance_set.filter(question__benchmarks=benchmark).aggregate(Sum('points_earned'))['points_earned__sum']
             benchmark_instances.append(benchmark_instance)
         test_instance.benchmarks = benchmark_instances
     
         test_instance.incorrects = test_instance.answerinstance_set.filter(points_earned__lt=F('points_possible'))
         for incorrect in test_instance.incorrects:
             try:
                 incorrect.right_answer = incorrect.question.answer_set.order_by('point_value').reverse()[0]
             except:
                 incorrect.right_answer = "No correct answer"
         
     report.data['test'] = test
     report.data['tests'] = test_instances
     
     report.filename = 'Student Results for ' + unicode(test)
     return report.pod_save(template)  
Example #5
0
    def download_teacher_results(self, test, format, template):
        """ Make appy based report showing results for a whole class """
        report = TemplateReport()
        report.file_format = format
        test_instances = test.testinstance_set.annotate(Sum('answerinstance__points_earned'))
        test.benchmarks = Benchmark.objects.filter(question__test=test).distinct()
        
        points_possible = test.points_possible
        points_to_earn = 0.70 * test.points_possible
        number_above_70 = test_instances.filter(answerinstance__points_earned__sum__gte=points_to_earn).count()
        total_test_takers = test.testinstance_set.filter(answerinstance__points_earned__gt=0).distinct().count()
        test.percent_over_70 = float(number_above_70) / total_test_takers

        for benchmark in test.benchmarks:
            question_benchmarks = test.question_set.filter(benchmarks=benchmark)
            benchmark.points_possible = question_benchmarks.aggregate(Sum('point_value'))['point_value__sum']
            benchmark.total_points_possible = benchmark.points_possible * test_instances.count()
            benchmark.total_points_earned = question_benchmarks.aggregate(Sum('answerinstance__points_earned'))['answerinstance__points_earned__sum']
            benchmark.average = float(benchmark.total_points_earned) / benchmark.total_points_possible 
           
            # Percent students over 70%
            test_instances_over_70 = 0
            for test_instance in test_instances:
                 answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark)
                 answers_points = answers.aggregate(Sum('points_earned'), Sum('points_possible'))
                 instance_points_earned = answers_points['points_earned__sum']
                 instance_points_possible = answers_points['points_possible__sum']
                 instance_average = float(instance_points_earned) / instance_points_possible
                 if instance_average >= 0.70:
                     test_instances_over_70 += 1
            benchmark.over_70 = float(test_instances_over_70) / test_instances.count()

            benchmark.assessed_on = ""
            for question_benchmark in question_benchmarks:
                benchmark.assessed_on += "{}, ".format(question_benchmark.get_order_start_one)
            benchmark.assessed_on = benchmark.assessed_on[:-2]
        
        test.questions = test.question_set.all()
        for question in test.questions:
            question.benchmark_text = ''
            for benchmark in question.benchmarks.all():
                question.benchmark_text += '{}, '.format(benchmark.number)
            question.benchmark_text = question.benchmark_text[:-2]
            question.num_correct = question.answerinstance_set.filter(points_earned__gte=F('points_possible')).count()
            question.num_total = question.answerinstance_set.count()
            question.percent_correct = float(question.num_correct) / question.num_total

            
        report.data['test'] = test
        report.data['tests'] = test_instances
        
        report.filename = 'Teacher Results for ' + unicode(test)
        return report.pod_save(template)  
Example #6
0
    def download_student_results(self, test, format, template, cohorts=None):
        """ Make appy based report showing results for each student """
        report = TemplateReport()
        report.file_format = format
        if cohorts:
            test_instances = test.active_testinstance_set.filter(student__cohort__in=cohorts).distinct()
        else:
            test_instances = test.active_testinstance_set.all()
        benchmarks = Benchmark.objects.filter(question__test=test).distinct()

        for benchmark in benchmarks:
            benchmark.points_possible = test.question_set.filter(benchmarks=benchmark).aggregate(Sum('point_value'))['point_value__sum']
            # aggregate() may return None, which will raise a TypeError upon attempting arithmetic
            if benchmark.points_possible is None:
                benchmark.points_possible = 0

        for test_instance in test_instances:
            benchmark_instances = []
            for benchmark in benchmarks:
                benchmark_instance = Struct()
                benchmark_instance.benchmark = benchmark
                benchmark_instance.points_possible = benchmark.points_possible
                benchmark_instance.answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark)
                benchmark_instance.points_earned = benchmark_instance.answers.aggregate(Sum('points_earned'))['points_earned__sum']
                # aggregate() may return None, which will raise a TypeError upon attempting arithmetic
                if benchmark_instance.points_earned is None:
                    benchmark_instance.points_earned = 0
                benchmark_instance.questions = ''
                for answer in benchmark_instance.answers.all():
                    benchmark_instance.questions += '{}, '.format(answer.question.get_order_start_one)
                benchmark_instance.questions = benchmark_instance.questions[:-2]
                benchmark_instances.append(benchmark_instance)
            test_instance.benchmarks = benchmark_instances

            test_instance.incorrects = test_instance.answerinstance_set.filter(points_earned__lt=F('points_possible'))
            for incorrect in test_instance.incorrects:
                incorrect.benchmarks = ''
                for benchmark in incorrect.question.benchmarks.all():
                    incorrect.benchmarks += '{}, '.format(benchmark.number)
                incorrect.benchmarks = incorrect.benchmarks[:-2]

                try:
                    incorrect.right_answer = incorrect.question.answer_set.order_by('point_value').reverse()[0]
                except:
                    incorrect.right_answer = "No correct answer"

        report.data['test'] = test
        report.data['tests'] = test_instances

        report.filename = 'Student Results for ' + unicode(test)
        return report.pod_save(template)
Example #7
0
    def download_student_results(self, test, format, template):
        """ Make appy based report showing results for each student """
        report = TemplateReport()
        report.file_format = format
        test_instances = test.active_testinstance_set.all()
        benchmarks = Benchmark.objects.filter(question__test=test).distinct()

        for benchmark in benchmarks:
            benchmark.points_possible = test.question_set.filter(benchmarks=benchmark).aggregate(Sum("point_value"))[
                "point_value__sum"
            ]

        for test_instance in test_instances:
            benchmark_instances = []
            for benchmark in benchmarks:
                benchmark_instance = Struct()
                benchmark_instance.benchmark = benchmark
                benchmark_instance.points_possible = benchmark.points_possible
                benchmark_instance.answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark)
                benchmark_instance.points_earned = benchmark_instance.answers.aggregate(Sum("points_earned"))[
                    "points_earned__sum"
                ]
                benchmark_instance.questions = ""
                for answer in benchmark_instance.answers.all():
                    benchmark_instance.questions += "{}, ".format(answer.question.get_order_start_one)
                benchmark_instance.questions = benchmark_instance.questions[:-2]
                benchmark_instances.append(benchmark_instance)
            test_instance.benchmarks = benchmark_instances

            test_instance.incorrects = test_instance.answerinstance_set.filter(points_earned__lt=F("points_possible"))
            for incorrect in test_instance.incorrects:
                incorrect.benchmarks = ""
                for benchmark in incorrect.question.benchmarks.all():
                    incorrect.benchmarks += "{}, ".format(benchmark.number)
                incorrect.benchmarks = incorrect.benchmarks[:-2]

                try:
                    incorrect.right_answer = incorrect.question.answer_set.order_by("point_value").reverse()[0]
                except:
                    incorrect.right_answer = "No correct answer"

        report.data["test"] = test
        report.data["tests"] = test_instances

        report.filename = "Student Results for " + unicode(test)
        return report.pod_save(template)
Example #8
0
    def download_student_results(self, test, format, template):
        """ Make appy based report showing results for each student """
        report = TemplateReport()
        report.file_format = format
        test_instances = test.testinstance_set.all()
        benchmarks = Benchmark.objects.filter(question__test=test)

        for benchmark in benchmarks:
            benchmark.points_possible = test.question_set.filter(
                benchmarks=benchmark).aggregate(
                    Sum('point_value'))['point_value__sum']

        for test_instance in test_instances:
            benchmark_instances = []
            for benchmark in benchmarks:
                benchmark_instance = Struct()
                benchmark_instance.benchmark = benchmark
                benchmark_instance.points_possible = benchmark.points_possible
                benchmark_instance.points_earned = test_instance.answerinstance_set.filter(
                    question__benchmarks=benchmark).aggregate(
                        Sum('points_earned'))['points_earned__sum']
                benchmark_instances.append(benchmark_instance)
            test_instance.benchmarks = benchmark_instances

            test_instance.incorrects = test_instance.answerinstance_set.filter(
                points_earned__lt=F('points_possible'))
            for incorrect in test_instance.incorrects:
                try:
                    incorrect.right_answer = incorrect.question.answer_set.order_by(
                        'point_value').reverse()[0]
                except:
                    incorrect.right_answer = "No correct answer"

        report.data['test'] = test
        report.data['tests'] = test_instances

        report.filename = 'Student Results for ' + unicode(test)
        return report.pod_save(template)
Example #9
0
    def download_teacher_results(self, test, format, template, cohorts=None):
        """ Make appy based report showing results for a whole class """
        if not cohorts:
            cohorts = Cohort.objects.all()

        # Stupid f*****g hack
        subquery = test.testinstance_set.filter(student__cohort__in=cohorts).distinct()

        report = TemplateReport()
        report.file_format = format
        test_instances = test.active_testinstance_set.filter(answerinstance__points_earned__gt=0).filter(pk__in=subquery).annotate(Sum('answerinstance__points_earned'))
        test.benchmarks = Benchmark.objects.filter(question__test=test).distinct()

        points_possible = test.points_possible
        points_to_earn = 0.70 * test.points_possible
        number_gte_70 = test_instances.filter(pk__in=subquery).filter(answerinstance__points_earned__sum__gte=points_to_earn).count()
        total_test_takers = test_instances.filter(pk__in=subquery).filter(answerinstance__points_earned__gt=0).distinct().count()
        if total_test_takers:
            test.percent_gte_70 = float(number_gte_70) / total_test_takers
        else:
            test.percent_gte_70 = 0
        test.report_average = test.get_average(cohorts=cohorts)

        for benchmark in test.benchmarks:
            # TODO: eliminate this subquery? is the idea to eliminate any questions that all students left unanswered?
            qb_subquery = test.question_set.filter(answerinstance__test_instance__student__cohort__in=cohorts).distinct()
            question_benchmarks = test.question_set.filter(pk__in=qb_subquery).filter(benchmarks=benchmark).distinct()
            benchmark.points_possible = question_benchmarks.aggregate(Sum('point_value'))['point_value__sum']
            # aggregate() may return None, which will raise a TypeError upon attempting arithmetic
            if benchmark.points_possible is None:
                benchmark.points_possible = 0
            benchmark.total_points_possible = benchmark.points_possible * test_instances.count()
            # Really think this should work...but nope.
            #benchmark.total_points_earned = question_benchmarks.aggregate(Sum('answerinstance__points_earned'))['answerinstance__points_earned__sum']
            earned_sum = 0
            for question_benchmark in question_benchmarks:
                for answer in question_benchmark.active_answerinstance_set.filter(test_instance__student__cohort__in=cohorts).distinct():
                    earned_sum += answer.points_earned
            benchmark.total_points_earned = earned_sum

            if benchmark.total_points_possible:
                benchmark.average = float(benchmark.total_points_earned) / benchmark.total_points_possible
            else:
                benchmark.average = 0

            # Percent of students scoring at or above 70%
            test_instances_gte_70 = 0
            for test_instance in test_instances:
                answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark)
                answers_points = answers.aggregate(Sum('points_earned'), Sum('points_possible'))
                instance_points_earned = answers_points['points_earned__sum']
                instance_points_possible = answers_points['points_possible__sum']
                # aggregate() may return None, which will raise a TypeError upon attempting arithmetic
                if instance_points_earned is None:
                    instance_points_earned = 0
                if instance_points_possible is None:
                    instance_points_possible = 0
                if instance_points_earned and instance_points_possible:
                    instance_average = float(instance_points_earned) / instance_points_possible
                    if instance_average >= 0.70:
                        test_instances_gte_70 += 1
            if test_instances.count():
                benchmark.gte_70 = float(test_instances_gte_70) / test_instances.count()
            else:
                benchmark.gte_70 = 0

            benchmark.assessed_on = ""
            for question_benchmark in question_benchmarks:
                benchmark.assessed_on += "{}, ".format(question_benchmark.get_order_start_one)
            benchmark.assessed_on = benchmark.assessed_on[:-2]

        test.questions = test.question_set.all()
        for question in test.questions:
            question.benchmark_text = ''
            for benchmark in question.benchmarks.all():
                question.benchmark_text += '{}, '.format(benchmark.number)
            question.benchmark_text = question.benchmark_text[:-2]
            # grab all the AnswerInstances that we care about for this question
            answerinstances = question.answerinstance_set.filter(test_instance__student__cohort__in=cohorts).distinct()
            # nasty! http://stackoverflow.com/questions/4093910/django-aggregates-sums-in-postgresql-dont-use-distinct-is-this-a-bug/4917507#4917507
            answerinstances = question.active_answerinstance_set.filter(pk__in=answerinstances)
            # calculate the COUNT of correct student responses for this question
            question.num_correct = answerinstances.filter(points_earned__gte=F('points_possible')).count()
            # calculate the COUNT of all student responses for this question
            question.num_total = answerinstances.count()
            # http://www.merriam-webster.com/dictionary/percent: "cent" means 100, but I'll stick with the existing convention
            if question.num_total:
                question.percent_correct = float(question.num_correct) / question.num_total
            else:
                question.percent_correct = 0
            # calculate the sum of all points earned and the sum of all points possible for this question
            earned_possible = answerinstances.aggregate(Sum('points_earned'), Sum('points_possible'))
            question.points_earned = earned_possible['points_earned__sum']
            question.points_possible = earned_possible['points_possible__sum']
            # aggregate() may return None, which will raise a TypeError upon attempting arithmetic
            if question.points_earned is None:
                question.points_earned = 0
            if question.points_possible is None:
                question.points_possible = 0
            if question.points_possible:
                question.percent_points_earned = float(question.points_earned) / question.points_possible
            else:
                question.percent_points_earned = 0

            # Most common wrong answer
            most_wrong = question.answerinstance_set.filter(
                points_earned=0).values(
                    'answer'
                ).annotate(
                    count=Count(
                        'id'
                    )
                ).order_by('-count').first()
            if most_wrong and most_wrong['count'] > 1:
                question.most_wrong = Answer.objects.get(
                    id=most_wrong['answer']
                ).letter
                question.most_wrong_times = most_wrong['count']
                question.most_wrong_verbose = '{} ({})'.format(
                    question.most_wrong,
                    question.most_wrong_times,)
            else:
                question.most_wrong = ''
                question.most_wrong_times = ''
                question.most_wrong_verbose = ''

        report.data['test'] = test
        report.data['tests'] = test_instances

        report.filename = 'Teacher Results for ' + unicode(test)
        return report.pod_save(template)
Example #10
0
    def download_teacher_results(self, test, format, template, cohorts=None):
        """ Make appy based report showing results for a whole class """
        if not cohorts:
            cohorts = Cohort.objects.all()

        # Stupid f*****g hack
        subquery = test.testinstance_set.filter(
            student__cohort__in=cohorts).distinct()

        report = TemplateReport()
        report.file_format = format
        test_instances = test.active_testinstance_set.filter(
            answerinstance__points_earned__gt=0).filter(
                pk__in=subquery).annotate(Sum('answerinstance__points_earned'))
        test.benchmarks = Benchmark.objects.filter(
            question__test=test).distinct()

        points_possible = test.points_possible
        points_to_earn = 0.70 * test.points_possible
        number_gte_70 = test_instances.filter(pk__in=subquery).filter(
            answerinstance__points_earned__sum__gte=points_to_earn).count()
        total_test_takers = test_instances.filter(pk__in=subquery).filter(
            answerinstance__points_earned__gt=0).distinct().count()
        if total_test_takers:
            test.percent_gte_70 = float(number_gte_70) / total_test_takers
        else:
            test.percent_gte_70 = 0
        test.report_average = test.get_average(cohorts=cohorts)

        for benchmark in test.benchmarks:
            # TODO: eliminate this subquery? is the idea to eliminate any questions that all students left unanswered?
            qb_subquery = test.question_set.filter(
                answerinstance__test_instance__student__cohort__in=cohorts
            ).distinct()
            question_benchmarks = test.question_set.filter(
                pk__in=qb_subquery).filter(benchmarks=benchmark).distinct()
            benchmark.points_possible = question_benchmarks.aggregate(
                Sum('point_value'))['point_value__sum']
            # aggregate() may return None, which will raise a TypeError upon attempting arithmetic
            if benchmark.points_possible is None:
                benchmark.points_possible = 0
            benchmark.total_points_possible = benchmark.points_possible * test_instances.count(
            )
            # Really think this should work...but nope.
            #benchmark.total_points_earned = question_benchmarks.aggregate(Sum('answerinstance__points_earned'))['answerinstance__points_earned__sum']
            earned_sum = 0
            for question_benchmark in question_benchmarks:
                for answer in question_benchmark.active_answerinstance_set.filter(
                        test_instance__student__cohort__in=cohorts).distinct():
                    earned_sum += answer.points_earned
            benchmark.total_points_earned = earned_sum

            if benchmark.total_points_possible:
                benchmark.average = float(benchmark.total_points_earned
                                          ) / benchmark.total_points_possible
            else:
                benchmark.average = 0

            # Percent of students scoring at or above 70%
            test_instances_gte_70 = 0
            for test_instance in test_instances:
                answers = test_instance.answerinstance_set.filter(
                    question__benchmarks=benchmark)
                answers_points = answers.aggregate(Sum('points_earned'),
                                                   Sum('points_possible'))
                instance_points_earned = answers_points['points_earned__sum']
                instance_points_possible = answers_points[
                    'points_possible__sum']
                # aggregate() may return None, which will raise a TypeError upon attempting arithmetic
                if instance_points_earned is None:
                    instance_points_earned = 0
                if instance_points_possible is None:
                    instance_points_possible = 0
                if instance_points_earned and instance_points_possible:
                    instance_average = float(
                        instance_points_earned) / instance_points_possible
                    if instance_average >= 0.70:
                        test_instances_gte_70 += 1
            if test_instances.count():
                benchmark.gte_70 = float(
                    test_instances_gte_70) / test_instances.count()
            else:
                benchmark.gte_70 = 0

            benchmark.assessed_on = ""
            for question_benchmark in question_benchmarks:
                benchmark.assessed_on += "{}, ".format(
                    question_benchmark.get_order_start_one)
            benchmark.assessed_on = benchmark.assessed_on[:-2]

        test.questions = test.question_set.all()
        for question in test.questions:
            question.benchmark_text = ''
            for benchmark in question.benchmarks.all():
                question.benchmark_text += '{}, '.format(benchmark.number)
            question.benchmark_text = question.benchmark_text[:-2]
            # grab all the AnswerInstances that we care about for this question
            answerinstances = question.answerinstance_set.filter(
                test_instance__student__cohort__in=cohorts).distinct()
            # nasty! http://stackoverflow.com/questions/4093910/django-aggregates-sums-in-postgresql-dont-use-distinct-is-this-a-bug/4917507#4917507
            answerinstances = question.active_answerinstance_set.filter(
                pk__in=answerinstances)
            # calculate the COUNT of correct student responses for this question
            question.num_correct = answerinstances.filter(
                points_earned__gte=F('points_possible')).count()
            # calculate the COUNT of all student responses for this question
            question.num_total = answerinstances.count()
            # http://www.merriam-webster.com/dictionary/percent: "cent" means 100, but I'll stick with the existing convention
            if question.num_total:
                question.percent_correct = float(
                    question.num_correct) / question.num_total
            else:
                question.percent_correct = 0
            # calculate the sum of all points earned and the sum of all points possible for this question
            earned_possible = answerinstances.aggregate(
                Sum('points_earned'), Sum('points_possible'))
            question.points_earned = earned_possible['points_earned__sum']
            question.points_possible = earned_possible['points_possible__sum']
            # aggregate() may return None, which will raise a TypeError upon attempting arithmetic
            if question.points_earned is None:
                question.points_earned = 0
            if question.points_possible is None:
                question.points_possible = 0
            if question.points_possible:
                question.percent_points_earned = float(
                    question.points_earned) / question.points_possible
            else:
                question.percent_points_earned = 0

        report.data['test'] = test
        report.data['tests'] = test_instances

        report.filename = 'Teacher Results for ' + unicode(test)
        return report.pod_save(template)
Example #11
0
    def download_teacher_results(self, test, format, template, cohorts=None):
        """ Make appy based report showing results for a whole class """
        if not cohorts:
            cohorts = Cohort.objects.all()

        # Stupid f*****g hack
        subquery = test.testinstance_set.filter(student__cohort__in=cohorts).distinct()

        report = TemplateReport()
        report.file_format = format
        test_instances = (
            test.active_testinstance_set.filter(answerinstance__points_earned__gt=0)
            .filter(pk__in=subquery)
            .annotate(Sum("answerinstance__points_earned"))
        )
        test.benchmarks = Benchmark.objects.filter(question__test=test).distinct()

        points_possible = test.points_possible
        points_to_earn = 0.70 * test.points_possible
        number_gte_70 = (
            test_instances.filter(pk__in=subquery)
            .filter(answerinstance__points_earned__sum__gte=points_to_earn)
            .count()
        )
        total_test_takers = (
            test_instances.filter(pk__in=subquery).filter(answerinstance__points_earned__gt=0).distinct().count()
        )
        test.percent_gte_70 = float(number_gte_70) / total_test_takers
        test.report_average = test.get_average(cohorts=cohorts)

        for benchmark in test.benchmarks:
            # TODO: eliminate this subquery? is the idea to eliminate any questions that all students left unanswered?
            qb_subquery = test.question_set.filter(
                answerinstance__test_instance__student__cohort__in=cohorts
            ).distinct()
            question_benchmarks = test.question_set.filter(pk__in=qb_subquery).filter(benchmarks=benchmark).distinct()
            benchmark.points_possible = question_benchmarks.aggregate(Sum("point_value"))["point_value__sum"]
            benchmark.total_points_possible = benchmark.points_possible * test_instances.count()
            # Really think this should work...but nope.
            # benchmark.total_points_earned = question_benchmarks.aggregate(Sum('answerinstance__points_earned'))['answerinstance__points_earned__sum']
            earned_sum = 0
            for question_benchmark in question_benchmarks:
                for answer in question_benchmark.active_answerinstance_set.filter(
                    test_instance__student__cohort__in=cohorts
                ).distinct():
                    earned_sum += answer.points_earned
            benchmark.total_points_earned = earned_sum

            benchmark.average = float(benchmark.total_points_earned) / benchmark.total_points_possible

            # Percent of students scoring at or above 70%
            test_instances_gte_70 = 0
            for test_instance in test_instances:
                answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark)
                answers_points = answers.aggregate(Sum("points_earned"), Sum("points_possible"))
                instance_points_earned = answers_points["points_earned__sum"]
                instance_points_possible = answers_points["points_possible__sum"]
                if instance_points_earned and instance_points_possible:
                    instance_average = float(instance_points_earned) / instance_points_possible
                    if instance_average >= 0.70:
                        test_instances_gte_70 += 1
            benchmark.gte_70 = float(test_instances_gte_70) / test_instances.count()

            benchmark.assessed_on = ""
            for question_benchmark in question_benchmarks:
                benchmark.assessed_on += "{}, ".format(question_benchmark.get_order_start_one)
            benchmark.assessed_on = benchmark.assessed_on[:-2]

        test.questions = test.question_set.all()
        for question in test.questions:
            question.benchmark_text = ""
            for benchmark in question.benchmarks.all():
                question.benchmark_text += "{}, ".format(benchmark.number)
            question.benchmark_text = question.benchmark_text[:-2]
            # grab all the AnswerInstances that we care about for this question
            answerinstances = question.answerinstance_set.filter(test_instance__student__cohort__in=cohorts).distinct()
            # nasty! http://stackoverflow.com/questions/4093910/django-aggregates-sums-in-postgresql-dont-use-distinct-is-this-a-bug/4917507#4917507
            answerinstances = question.active_answerinstance_set.filter(pk__in=answerinstances)
            # calculate the COUNT of correct student responses for this question
            question.num_correct = answerinstances.filter(points_earned__gte=F("points_possible")).count()
            # calculate the COUNT of all student responses for this question
            question.num_total = answerinstances.count()
            # http://www.merriam-webster.com/dictionary/percent: "cent" means 100, but I'll stick with the existing convention
            question.percent_correct = float(question.num_correct) / question.num_total
            # calculate the sum of all points earned and the sum of all points possible for this question
            earned_possible = answerinstances.aggregate(Sum("points_earned"), Sum("points_possible"))
            question.points_earned = earned_possible["points_earned__sum"]
            question.points_possible = earned_possible["points_possible__sum"]
            question.percent_points_earned = float(question.points_earned) / question.points_possible

        report.data["test"] = test
        report.data["tests"] = test_instances

        report.filename = "Teacher Results for " + unicode(test)
        return report.pod_save(template)