def new_replace_spreadsheet(infile, outfile, data, type="ods", sheets=False): """ An appy pod implimentation, appy is too buggy though """ from ecwsp.sis.template_report import TemplateReport report = TemplateReport() report.data = data report.filename = outfile return report.pod_save(infile, ext=".ods")
def daily_attendance_report(adate, request, private_notes=False, type="odt"): from ecwsp.sis.models import GradeLevel template = Template.objects.get_or_create(name="Daily Attendance")[0] template = template.get_template_path(request) if not template: return HttpResponseRedirect(request.META.get('HTTP_REFERER','/')) if request: report = TemplateReport(request.user) else: report = TemplateReport() report.data['selected_date'] = unicode(adate) report.data['school_day'] = get_school_day_number(adate) attendance = StudentAttendance.objects.filter(date=adate) students = Student.objects.filter(student_attn__in=attendance) active_year = SchoolYear.objects.get(active_year=True) active_year_dates = (active_year.start_date, active_year.end_date) for year in GradeLevel.objects.all(): attns = attendance.filter(student__year__id=year.id) for attn in attns: attn.student.fname = attn.student.first_name attn.student.lname = attn.student.last_name if attn.status.absent: attn.total = StudentAttendance.objects.filter(student=attn.student, status__absent=True, status__half=False, date__range=active_year_dates).count() halfs = StudentAttendance.objects.filter(student=attn.student, status__absent=True, status__half=True,date__range=active_year_dates).count() / 2 attn.total += (float(halfs)/2) elif attn.status.tardy: attn.total = StudentAttendance.objects.filter(student=attn.student, status__tardy=True, date__range=active_year_dates).count() else: attn.total = StudentAttendance.objects.filter(student=attn.student, status=attn.status, date__range=active_year_dates).count() report.data['absences_' + str(year.id)] = attns attn_list = "" for status in AttendanceStatus.objects.exclude(name="Present"): attn = StudentAttendance.objects.filter(status=status, date=adate, student__year__id=year.id) if attn.count() > 0: attn_list += unicode(status.name) + " " + unicode(attn.count()) + ", " if len(attn_list) > 3: attn_list = attn_list[:-3] report.data['stat_' + str(year.id)] = attn_list report.data['comments'] = "" for attn in StudentAttendance.objects.filter(date=adate): if (attn.notes) or (attn.private_notes and private_notes): report.data['comments'] += unicode(attn.student) + ": " if attn.notes: report.data['comments'] += unicode(attn.notes) + " " if attn.private_notes and private_notes: report.data['comments'] += unicode(attn.private_notes) report.data['comments'] += ", " if len(report.data['comments']) > 3: report.data['comments'] = report.data['comments'][:-3] report.filename = "daily_attendance" return report.pod_save(template)
def attendance_student(request, id, all_years=False, order_by="Date", include_private_notes=False): """ Attendance report on particular student """ from ecwsp.sis.template_report import TemplateReport report = TemplateReport(request.user) student = Student.objects.get(id=id) if all_years: attendances = StudentAttendance.objects.filter(student=student) else: active_year = SchoolYear.objects.get(active_year=True) active_year_dates = (active_year.start_date, active_year.end_date) attendances = StudentAttendance.objects.filter(student=student, date__range=active_year_dates) if order_by == "Status": attendances = attendances.order_by('status') report.data['attendances'] = [] for attn in attendances: if include_private_notes: notes = unicode(attn.notes) + " " + unicode(attn.private_notes) else: notes = unicode(attn.notes) attendance = Struct() attendance.date = attn.date attendance.status = attn.status attendance.notes = notes report.data['attendances'].append(attendance) # data['attendances'] = attendances report.data['student'] = student report.data['students'] = [student] report.data['student_year'] = student.year template = Template.objects.get_or_create(name="Student Attendance Report")[0] template = template.get_template_path(request) report.filename = unicode(student) + "_Attendance" return report.pod_save(template)
def attendance_student(request, id, all_years=False, order_by="Date", include_private_notes=False): """ Attendance report on particular student """ from ecwsp.sis.template_report import TemplateReport report = TemplateReport(request.user) student = Student.objects.get(id=id) if all_years: attendances = StudentAttendance.objects.filter(student=student) else: active_year = SchoolYear.objects.get(active_year=True) active_year_dates = (active_year.start_date, active_year.end_date) attendances = StudentAttendance.objects.filter(student=student, date__range=active_year_dates) if order_by == "Status": attendances = attendances.order_by('status') report.data['attendances'] = [] for attn in attendances: if include_private_notes: notes = unicode(attn.notes) + " " + unicode(attn.private_notes) else: notes = unicode(attn.notes) attendance = Struct() attendance.date = attn.date attendance.status = attn.status attendance.notes = notes report.data['attendances'].append(attendance) # data['attendances'] = attendances report.data['student'] = student report.data['student_year'] = student.year template = Template.objects.get_or_create(name="Student Attendance Report")[0] template = template.get_template_path(request) report.filename = unicode(student) + "_Attendance" return report.pod_save(template)
def download_student_results(self, test, format, template, cohorts=None): """ Make appy based report showing results for each student """ report = TemplateReport() report.file_format = format if cohorts: test_instances = test.active_testinstance_set.filter( student__cohort__in=cohorts).distinct() else: test_instances = test.active_testinstance_set.all() benchmarks = Benchmark.objects.filter(question__test=test).distinct() for benchmark in benchmarks: benchmark.points_possible = test.question_set.filter( benchmarks=benchmark).aggregate( Sum('point_value'))['point_value__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if benchmark.points_possible is None: benchmark.points_possible = 0 for test_instance in test_instances: benchmark_instances = [] for benchmark in benchmarks: benchmark_instance = Struct() benchmark_instance.benchmark = benchmark benchmark_instance.points_possible = benchmark.points_possible benchmark_instance.answers = test_instance.answerinstance_set.filter( question__benchmarks=benchmark) benchmark_instance.points_earned = benchmark_instance.answers.aggregate( Sum('points_earned'))['points_earned__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if benchmark_instance.points_earned is None: benchmark_instance.points_earned = 0 benchmark_instance.questions = '' for answer in benchmark_instance.answers.all(): benchmark_instance.questions += '{}, '.format( answer.question.get_order_start_one) benchmark_instance.questions = benchmark_instance.questions[: -2] benchmark_instances.append(benchmark_instance) test_instance.benchmarks = benchmark_instances test_instance.incorrects = test_instance.answerinstance_set.filter( points_earned__lt=F('points_possible')) for incorrect in test_instance.incorrects: incorrect.benchmarks = '' for benchmark in incorrect.question.benchmarks.all(): incorrect.benchmarks += '{}, '.format(benchmark.number) incorrect.benchmarks = incorrect.benchmarks[:-2] try: incorrect.right_answer = incorrect.question.answer_set.order_by( 'point_value').reverse()[0] except: incorrect.right_answer = "No correct answer" report.data['test'] = test report.data['tests'] = test_instances report.filename = 'Student Results for ' + unicode(test) return report.pod_save(template)
def download_student_results(self, test, format, template): """ Make appy based report showing results for each student """ report = TemplateReport() report.file_format = format test_instances = test.testinstance_set.all() benchmarks = Benchmark.objects.filter(question__test=test) for benchmark in benchmarks: benchmark.points_possible = test.question_set.filter(benchmarks=benchmark).aggregate(Sum('point_value'))['point_value__sum'] for test_instance in test_instances: benchmark_instances = [] for benchmark in benchmarks: benchmark_instance = Struct() benchmark_instance.benchmark = benchmark benchmark_instance.points_possible = benchmark.points_possible benchmark_instance.points_earned = test_instance.answerinstance_set.filter(question__benchmarks=benchmark).aggregate(Sum('points_earned'))['points_earned__sum'] benchmark_instances.append(benchmark_instance) test_instance.benchmarks = benchmark_instances test_instance.incorrects = test_instance.answerinstance_set.filter(points_earned__lt=F('points_possible')) for incorrect in test_instance.incorrects: try: incorrect.right_answer = incorrect.question.answer_set.order_by('point_value').reverse()[0] except: incorrect.right_answer = "No correct answer" report.data['test'] = test report.data['tests'] = test_instances report.filename = 'Student Results for ' + unicode(test) return report.pod_save(template)
def generate_contract_file(self): report = TemplateReport() report.data['contract'] = self report.filename = unicode(self.company) + "_contract" if settings.PREFERED_FORMAT == "m": report.file_format = "doc" else: report.file_format = "odt" if self.company and self.company.alternative_contract_template: template = self.company.alternative_contract_template.file else: template = Template.get_or_make_blank(name="Work Study Contract").file.path if template : report_file = report.pod_save(template, get_tmp_file=True) self.contract_file.save(unicode(self.company) + "." + unicode(report.file_format), File(open(report_file)))
def download_teacher_results(self, test, format, template): """ Make appy based report showing results for a whole class """ report = TemplateReport() report.file_format = format test_instances = test.testinstance_set.annotate(Sum('answerinstance__points_earned')) test.benchmarks = Benchmark.objects.filter(question__test=test).distinct() points_possible = test.points_possible points_to_earn = 0.70 * test.points_possible number_above_70 = test_instances.filter(answerinstance__points_earned__sum__gte=points_to_earn).count() total_test_takers = test.testinstance_set.filter(answerinstance__points_earned__gt=0).distinct().count() test.percent_over_70 = float(number_above_70) / total_test_takers for benchmark in test.benchmarks: question_benchmarks = test.question_set.filter(benchmarks=benchmark) benchmark.points_possible = question_benchmarks.aggregate(Sum('point_value'))['point_value__sum'] benchmark.total_points_possible = benchmark.points_possible * test_instances.count() benchmark.total_points_earned = question_benchmarks.aggregate(Sum('answerinstance__points_earned'))['answerinstance__points_earned__sum'] benchmark.average = float(benchmark.total_points_earned) / benchmark.total_points_possible # Percent students over 70% test_instances_over_70 = 0 for test_instance in test_instances: answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark) answers_points = answers.aggregate(Sum('points_earned'), Sum('points_possible')) instance_points_earned = answers_points['points_earned__sum'] instance_points_possible = answers_points['points_possible__sum'] instance_average = float(instance_points_earned) / instance_points_possible if instance_average >= 0.70: test_instances_over_70 += 1 benchmark.over_70 = float(test_instances_over_70) / test_instances.count() benchmark.assessed_on = "" for question_benchmark in question_benchmarks: benchmark.assessed_on += "{}, ".format(question_benchmark.get_order_start_one) benchmark.assessed_on = benchmark.assessed_on[:-2] test.questions = test.question_set.all() for question in test.questions: question.benchmark_text = '' for benchmark in question.benchmarks.all(): question.benchmark_text += '{}, '.format(benchmark.number) question.benchmark_text = question.benchmark_text[:-2] question.num_correct = question.answerinstance_set.filter(points_earned__gte=F('points_possible')).count() question.num_total = question.answerinstance_set.count() question.percent_correct = float(question.num_correct) / question.num_total report.data['test'] = test report.data['tests'] = test_instances report.filename = 'Teacher Results for ' + unicode(test) return report.pod_save(template)
def discipline_report(request, student_id): """Generate a complete report of a student's discipline history """ from ecwsp.sis.template_report import TemplateReport template, created = Template.objects.get_or_create( name="Discipline Report") template = template.get_template_path(request) report = TemplateReport(request.user) report.filename = 'disc_report' student = Student.objects.get(id=student_id) disc = StudentDiscipline.objects.filter(students=student) report.data['disciplines'] = disc report.data['school_year'] = SchoolYear.objects.get(active_year=True) report.data['student'] = student report.data['student_year'] = student.year return report.pod_save(template)
def download_student_results(self, test, format, template, cohorts=None): """ Make appy based report showing results for each student """ report = TemplateReport() report.file_format = format if cohorts: test_instances = test.active_testinstance_set.filter(student__cohort__in=cohorts).distinct() else: test_instances = test.active_testinstance_set.all() benchmarks = Benchmark.objects.filter(question__test=test).distinct() for benchmark in benchmarks: benchmark.points_possible = test.question_set.filter(benchmarks=benchmark).aggregate(Sum('point_value'))['point_value__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if benchmark.points_possible is None: benchmark.points_possible = 0 for test_instance in test_instances: benchmark_instances = [] for benchmark in benchmarks: benchmark_instance = Struct() benchmark_instance.benchmark = benchmark benchmark_instance.points_possible = benchmark.points_possible benchmark_instance.answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark) benchmark_instance.points_earned = benchmark_instance.answers.aggregate(Sum('points_earned'))['points_earned__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if benchmark_instance.points_earned is None: benchmark_instance.points_earned = 0 benchmark_instance.questions = '' for answer in benchmark_instance.answers.all(): benchmark_instance.questions += '{}, '.format(answer.question.get_order_start_one) benchmark_instance.questions = benchmark_instance.questions[:-2] benchmark_instances.append(benchmark_instance) test_instance.benchmarks = benchmark_instances test_instance.incorrects = test_instance.answerinstance_set.filter(points_earned__lt=F('points_possible')) for incorrect in test_instance.incorrects: incorrect.benchmarks = '' for benchmark in incorrect.question.benchmarks.all(): incorrect.benchmarks += '{}, '.format(benchmark.number) incorrect.benchmarks = incorrect.benchmarks[:-2] try: incorrect.right_answer = incorrect.question.answer_set.order_by('point_value').reverse()[0] except: incorrect.right_answer = "No correct answer" report.data['test'] = test report.data['tests'] = test_instances report.filename = 'Student Results for ' + unicode(test) return report.pod_save(template)
def discipline_report(request, student_id): """Generate a complete report of a student's discipline history """ from ecwsp.sis.template_report import TemplateReport template, created = Template.objects.get_or_create(name="Discipline Report") template = template.get_template_path(request) report = TemplateReport(request.user) report.filename = 'disc_report' student = Student.objects.get(id=student_id) disc = StudentDiscipline.objects.filter(students=student) report.data['disciplines'] = disc report.data['school_year'] = SchoolYear.objects.get(active_year=True) report.data['student'] = student report.data['student_year'] = student.year return report.pod_save(template)
def download_student_results(self, test, format, template): """ Make appy based report showing results for each student """ report = TemplateReport() report.file_format = format test_instances = test.active_testinstance_set.all() benchmarks = Benchmark.objects.filter(question__test=test).distinct() for benchmark in benchmarks: benchmark.points_possible = test.question_set.filter(benchmarks=benchmark).aggregate(Sum("point_value"))[ "point_value__sum" ] for test_instance in test_instances: benchmark_instances = [] for benchmark in benchmarks: benchmark_instance = Struct() benchmark_instance.benchmark = benchmark benchmark_instance.points_possible = benchmark.points_possible benchmark_instance.answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark) benchmark_instance.points_earned = benchmark_instance.answers.aggregate(Sum("points_earned"))[ "points_earned__sum" ] benchmark_instance.questions = "" for answer in benchmark_instance.answers.all(): benchmark_instance.questions += "{}, ".format(answer.question.get_order_start_one) benchmark_instance.questions = benchmark_instance.questions[:-2] benchmark_instances.append(benchmark_instance) test_instance.benchmarks = benchmark_instances test_instance.incorrects = test_instance.answerinstance_set.filter(points_earned__lt=F("points_possible")) for incorrect in test_instance.incorrects: incorrect.benchmarks = "" for benchmark in incorrect.question.benchmarks.all(): incorrect.benchmarks += "{}, ".format(benchmark.number) incorrect.benchmarks = incorrect.benchmarks[:-2] try: incorrect.right_answer = incorrect.question.answer_set.order_by("point_value").reverse()[0] except: incorrect.right_answer = "No correct answer" report.data["test"] = test report.data["tests"] = test_instances report.filename = "Student Results for " + unicode(test) return report.pod_save(template)
def download_student_results(self, test, format, template): """ Make appy based report showing results for each student """ report = TemplateReport() report.file_format = format test_instances = test.testinstance_set.all() benchmarks = Benchmark.objects.filter(question__test=test) for benchmark in benchmarks: benchmark.points_possible = test.question_set.filter( benchmarks=benchmark).aggregate( Sum('point_value'))['point_value__sum'] for test_instance in test_instances: benchmark_instances = [] for benchmark in benchmarks: benchmark_instance = Struct() benchmark_instance.benchmark = benchmark benchmark_instance.points_possible = benchmark.points_possible benchmark_instance.points_earned = test_instance.answerinstance_set.filter( question__benchmarks=benchmark).aggregate( Sum('points_earned'))['points_earned__sum'] benchmark_instances.append(benchmark_instance) test_instance.benchmarks = benchmark_instances test_instance.incorrects = test_instance.answerinstance_set.filter( points_earned__lt=F('points_possible')) for incorrect in test_instance.incorrects: try: incorrect.right_answer = incorrect.question.answer_set.order_by( 'point_value').reverse()[0] except: incorrect.right_answer = "No correct answer" report.data['test'] = test report.data['tests'] = test_instances report.filename = 'Student Results for ' + unicode(test) return report.pod_save(template)
def download_teacher_results(self, test, format, template, cohorts=None): """ Make appy based report showing results for a whole class """ if not cohorts: cohorts = Cohort.objects.all() # Stupid f*****g hack subquery = test.testinstance_set.filter(student__cohort__in=cohorts).distinct() report = TemplateReport() report.file_format = format test_instances = test.active_testinstance_set.filter(answerinstance__points_earned__gt=0).filter(pk__in=subquery).annotate(Sum('answerinstance__points_earned')) test.benchmarks = Benchmark.objects.filter(question__test=test).distinct() points_possible = test.points_possible points_to_earn = 0.70 * test.points_possible number_gte_70 = test_instances.filter(pk__in=subquery).filter(answerinstance__points_earned__sum__gte=points_to_earn).count() total_test_takers = test_instances.filter(pk__in=subquery).filter(answerinstance__points_earned__gt=0).distinct().count() if total_test_takers: test.percent_gte_70 = float(number_gte_70) / total_test_takers else: test.percent_gte_70 = 0 test.report_average = test.get_average(cohorts=cohorts) for benchmark in test.benchmarks: # TODO: eliminate this subquery? is the idea to eliminate any questions that all students left unanswered? qb_subquery = test.question_set.filter(answerinstance__test_instance__student__cohort__in=cohorts).distinct() question_benchmarks = test.question_set.filter(pk__in=qb_subquery).filter(benchmarks=benchmark).distinct() benchmark.points_possible = question_benchmarks.aggregate(Sum('point_value'))['point_value__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if benchmark.points_possible is None: benchmark.points_possible = 0 benchmark.total_points_possible = benchmark.points_possible * test_instances.count() # Really think this should work...but nope. #benchmark.total_points_earned = question_benchmarks.aggregate(Sum('answerinstance__points_earned'))['answerinstance__points_earned__sum'] earned_sum = 0 for question_benchmark in question_benchmarks: for answer in question_benchmark.active_answerinstance_set.filter(test_instance__student__cohort__in=cohorts).distinct(): earned_sum += answer.points_earned benchmark.total_points_earned = earned_sum if benchmark.total_points_possible: benchmark.average = float(benchmark.total_points_earned) / benchmark.total_points_possible else: benchmark.average = 0 # Percent of students scoring at or above 70% test_instances_gte_70 = 0 for test_instance in test_instances: answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark) answers_points = answers.aggregate(Sum('points_earned'), Sum('points_possible')) instance_points_earned = answers_points['points_earned__sum'] instance_points_possible = answers_points['points_possible__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if instance_points_earned is None: instance_points_earned = 0 if instance_points_possible is None: instance_points_possible = 0 if instance_points_earned and instance_points_possible: instance_average = float(instance_points_earned) / instance_points_possible if instance_average >= 0.70: test_instances_gte_70 += 1 if test_instances.count(): benchmark.gte_70 = float(test_instances_gte_70) / test_instances.count() else: benchmark.gte_70 = 0 benchmark.assessed_on = "" for question_benchmark in question_benchmarks: benchmark.assessed_on += "{}, ".format(question_benchmark.get_order_start_one) benchmark.assessed_on = benchmark.assessed_on[:-2] test.questions = test.question_set.all() for question in test.questions: question.benchmark_text = '' for benchmark in question.benchmarks.all(): question.benchmark_text += '{}, '.format(benchmark.number) question.benchmark_text = question.benchmark_text[:-2] # grab all the AnswerInstances that we care about for this question answerinstances = question.answerinstance_set.filter(test_instance__student__cohort__in=cohorts).distinct() # nasty! http://stackoverflow.com/questions/4093910/django-aggregates-sums-in-postgresql-dont-use-distinct-is-this-a-bug/4917507#4917507 answerinstances = question.active_answerinstance_set.filter(pk__in=answerinstances) # calculate the COUNT of correct student responses for this question question.num_correct = answerinstances.filter(points_earned__gte=F('points_possible')).count() # calculate the COUNT of all student responses for this question question.num_total = answerinstances.count() # http://www.merriam-webster.com/dictionary/percent: "cent" means 100, but I'll stick with the existing convention if question.num_total: question.percent_correct = float(question.num_correct) / question.num_total else: question.percent_correct = 0 # calculate the sum of all points earned and the sum of all points possible for this question earned_possible = answerinstances.aggregate(Sum('points_earned'), Sum('points_possible')) question.points_earned = earned_possible['points_earned__sum'] question.points_possible = earned_possible['points_possible__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if question.points_earned is None: question.points_earned = 0 if question.points_possible is None: question.points_possible = 0 if question.points_possible: question.percent_points_earned = float(question.points_earned) / question.points_possible else: question.percent_points_earned = 0 # Most common wrong answer most_wrong = question.answerinstance_set.filter( points_earned=0).values( 'answer' ).annotate( count=Count( 'id' ) ).order_by('-count').first() if most_wrong and most_wrong['count'] > 1: question.most_wrong = Answer.objects.get( id=most_wrong['answer'] ).letter question.most_wrong_times = most_wrong['count'] question.most_wrong_verbose = '{} ({})'.format( question.most_wrong, question.most_wrong_times,) else: question.most_wrong = '' question.most_wrong_times = '' question.most_wrong_verbose = '' report.data['test'] = test report.data['tests'] = test_instances report.filename = 'Teacher Results for ' + unicode(test) return report.pod_save(template)
def discipline_report_view(request): form = DisciplineStudentStatistics() merit_form = MeritForm() if request.method == 'POST': if 'merit' in request.POST: merit_form = MeritForm(request.POST) if merit_form.is_valid(): from ecwsp.sis.template_report import TemplateReport report = TemplateReport(request.user) report.filename = 'Merit Handouts' l1 = merit_form.cleaned_data['level_one'] l2 = merit_form.cleaned_data['level_two'] l3 = merit_form.cleaned_data['level_three'] l4 = merit_form.cleaned_data['level_four'] start_date = merit_form.cleaned_data['start_date'] end_date = merit_form.cleaned_data['end_date'] students = Student.objects.filter(inactive=False) if merit_form.cleaned_data['sort_by'] == 'year': students = students.order_by('year') elif merit_form.cleaned_data['sort_by'] == 'cohort': students = students.order_by('cache_cohort') disciplines = StudentDiscipline.objects.filter(date__range=(start_date, end_date)).values('students').annotate(Count('pk')) for student in students: disc = 0 for discipline in disciplines: if discipline['students'] == student.id: disc = discipline['pk__count'] break student.disc_count = disc if student.disc_count <= l1: student.merit_level = 1 elif student.disc_count <= l2: student.merit_level = 2 elif student.disc_count <= l3: student.merit_level = 3 elif student.disc_count <= l4: student.merit_level = 4 report.data['students'] = students template = Template.objects.get_or_create(name="Merit Level Handout")[0] template = template.get_template_path(request) if template: return report.pod_save(template) else: form = DisciplineStudentStatistics(request.POST) if form.is_valid(): data = [] start, end = form.get_dates() if 'student' in request.POST: students = Student.objects.all() school_year = SchoolYear.objects.get(active_year=True) if form.cleaned_data['date_begin']: if SchoolYear.objects.filter(end_date__gte=form.cleaned_data['date_begin']): school_year = SchoolYear.objects.filter(end_date__gte=form.cleaned_data['date_begin']).order_by('start_date')[0] # students belonging to this school year students = students.filter(course__marking_period__school_year__exact=school_year).distinct() if not form.cleaned_data['include_deleted'] : students = students.exclude(inactive=True) if form.cleaned_data['order_by'] == "Year": students = students.order_by('year') subtitles = ["Student",] titles = ["","Infractions",] for infr in Infraction.objects.all(): titles.append("") titles.pop() titles.append("Actions") for infr in Infraction.objects.all(): subtitles.append(unicode(infr)) for action in DisciplineAction.objects.all(): subtitles.append(unicode(action)) titles.append("") titles.pop() data.append(subtitles) pref = UserPreference.objects.get_or_create(user=request.user)[0] for student in students: disciplines = student.studentdiscipline_set.all() disciplines = disciplines.filter(date__range=(start, end)) stats = [unicode(student),] add = True for infr in Infraction.objects.all(): number = disciplines.filter(infraction=infr, students=student).count() stats.append(number) # check for filter if form.cleaned_data['infraction'] == infr: infraction_discipline = disciplines.filter(infraction=form.cleaned_data['infraction']) if number < form.cleaned_data['minimum_infraction']: add = False for action in DisciplineAction.objects.all(): actions = disciplines.filter(disciplineactioninstance__action=action, students=student).count() stats.append(actions) # check for filter if form.cleaned_data['action'] == action: if actions < form.cleaned_data['minimum_action']: add = False pref.get_additional_student_fields(stats, student, students, titles) if add: data.append(stats) report = XlReport(file_name="disc_stats") report.add_sheet(data, header_row=titles, title="Discipline Stats", heading="Discipline Stats") # By Teacher data = [] titles = ['teacher'] for action in DisciplineAction.objects.all(): titles.append(action) teachers = Faculty.objects.filter(studentdiscipline__isnull=False).distinct() disciplines = StudentDiscipline.objects.filter(date__range=(start, end)) for teacher in teachers: row = [teacher] for action in DisciplineAction.objects.all(): row.append(disciplines.filter(teacher=teacher, action=action).count()) data.append(row) report.add_sheet(data, header_row=titles, heading="By Teachers") return report.as_download() elif 'aggr' in request.POST: disciplines = StudentDiscipline.objects.filter(date__range=(start, end)) if form.cleaned_data['this_year']: school_start = SchoolYear.objects.get(active_year=True).start_date school_end = SchoolYear.objects.get(active_year=True).end_date disciplines = disciplines.filter(date__range=(school_start, school_end)) elif not form.cleaned_data['this_year'] and not form.cleaned_data['all_years']: disciplines = disciplines.filter(date__range=(form.cleaned_data['date_begin'], form.cleaned_data['date_end'])) stats = [] titles = [] for infr in Infraction.objects.all(): titles.append(infr) number = disciplines.filter(infraction=infr).count() stats.append(number) for action in DisciplineAction.objects.all(): titles.append(action) number = 0 for a in DisciplineActionInstance.objects.filter(action=action): number += a.quantity stats.append(number) data.append(stats) report = XlReport(file_name="disc_stats") report.add_sheet(data, header_row=titles, title="Discipline Stats", heading="Discipline Stats") return report.as_download() else: return render_to_response('discipline/disc_report.html', {'request': request, 'form': form}, RequestContext(request, {}),) return render_to_response('discipline/disc_report.html', {'request': request, 'form': form, 'merit_form':merit_form,}, RequestContext(request, {}),)
def discipline_report_view(request): form = DisciplineStudentStatistics() merit_form = MeritForm() if request.method == "POST": if "merit" in request.POST: merit_form = MeritForm(request.POST) if merit_form.is_valid(): from ecwsp.sis.template_report import TemplateReport report = TemplateReport(request.user) report.filename = "Merit Handouts" l1 = merit_form.cleaned_data["level_one"] l2 = merit_form.cleaned_data["level_two"] l3 = merit_form.cleaned_data["level_three"] l4 = merit_form.cleaned_data["level_four"] start_date = merit_form.cleaned_data["start_date"] end_date = merit_form.cleaned_data["end_date"] students = Student.objects.filter(is_active=True) if merit_form.cleaned_data["sort_by"] == "year": students = students.order_by("year") elif merit_form.cleaned_data["sort_by"] == "cohort": students = students.order_by("cache_cohort") disciplines = ( StudentDiscipline.objects.filter(date__range=(start_date, end_date)) .values("students") .annotate(Count("pk")) ) for student in students: disc = 0 for discipline in disciplines: if discipline["students"] == student.id: disc = discipline["pk__count"] break student.disc_count = disc if student.disc_count <= l1: student.merit_level = 1 elif student.disc_count <= l2: student.merit_level = 2 elif student.disc_count <= l3: student.merit_level = 3 elif student.disc_count <= l4: student.merit_level = 4 report.data["students"] = students template = Template.objects.get_or_create(name="Merit Level Handout")[0] template = template.get_template_path(request) if template: return report.pod_save(template) else: form = DisciplineStudentStatistics(request.POST) if form.is_valid(): data = [] start, end = form.get_dates() if "student" in request.POST: students = Student.objects.all() school_year = SchoolYear.objects.get(active_year=True) if form.cleaned_data["date_begin"]: if SchoolYear.objects.filter(end_date__gte=form.cleaned_data["date_begin"]): school_year = SchoolYear.objects.filter( end_date__gte=form.cleaned_data["date_begin"] ).order_by("start_date")[0] # students belonging to this school year students = students.filter( course__marking_period__school_year__exact=school_year ).distinct() if not form.cleaned_data["include_deleted"]: students = students.exclude(is_active=False) if form.cleaned_data["order_by"] == "Year": students = students.order_by("year") subtitles = ["Student"] titles = ["", "Infractions"] for infr in Infraction.objects.all(): titles.append("") titles.pop() titles.append("Actions") for infr in Infraction.objects.all(): subtitles.append(unicode(infr)) for action in DisciplineAction.objects.all(): subtitles.append(unicode(action)) titles.append("") titles.pop() data.append(subtitles) pref = UserPreference.objects.get_or_create(user=request.user)[0] for student in students: disciplines = student.studentdiscipline_set.all() disciplines = disciplines.filter(date__range=(start, end)) stats = [unicode(student)] add = True for infr in Infraction.objects.all(): number = disciplines.filter(infraction=infr, students=student).count() stats.append(number) # check for filter if form.cleaned_data["infraction"] == infr: infraction_discipline = disciplines.filter(infraction=form.cleaned_data["infraction"]) if number < form.cleaned_data["minimum_infraction"]: add = False for action in DisciplineAction.objects.all(): actions = disciplines.filter( disciplineactioninstance__action=action, students=student ).count() stats.append(actions) # check for filter if form.cleaned_data["action"] == action: if actions < form.cleaned_data["minimum_action"]: add = False if add: data.append(stats) report = XlReport(file_name="disc_stats") report.add_sheet(data, header_row=titles, title="Discipline Stats", heading="Discipline Stats") # By Teacher data = [] titles = ["teacher"] for action in DisciplineAction.objects.all(): titles.append(action) teachers = Faculty.objects.filter(studentdiscipline__isnull=False).distinct() disciplines = StudentDiscipline.objects.filter(date__range=(start, end)) for teacher in teachers: row = [teacher] for action in DisciplineAction.objects.all(): row.append(disciplines.filter(teacher=teacher, action=action).count()) data.append(row) report.add_sheet(data, header_row=titles, heading="By Teachers") return report.as_download() elif "aggr" in request.POST: disciplines = StudentDiscipline.objects.filter(date__range=(start, end)) if form.cleaned_data["this_year"]: school_start = SchoolYear.objects.get(active_year=True).start_date school_end = SchoolYear.objects.get(active_year=True).end_date disciplines = disciplines.filter(date__range=(school_start, school_end)) elif not form.cleaned_data["this_year"] and not form.cleaned_data["all_years"]: disciplines = disciplines.filter( date__range=(form.cleaned_data["date_begin"], form.cleaned_data["date_end"]) ) stats = [] titles = [] for infr in Infraction.objects.all(): titles.append(infr) number = disciplines.filter(infraction=infr).count() stats.append(number) for action in DisciplineAction.objects.all(): titles.append(action) number = 0 for a in DisciplineActionInstance.objects.filter(action=action): number += a.quantity stats.append(number) data.append(stats) report = XlReport(file_name="disc_stats") report.add_sheet(data, header_row=titles, title="Discipline Stats", heading="Discipline Stats") return report.as_download() else: return render_to_response( "discipline/disc_report.html", {"request": request, "form": form}, RequestContext(request, {}) ) return render_to_response( "discipline/disc_report.html", {"request": request, "form": form, "merit_form": merit_form}, RequestContext(request, {}), )
def download_teacher_results(self, test, format, template, cohorts=None): """ Make appy based report showing results for a whole class """ if not cohorts: cohorts = Cohort.objects.all() # Stupid f*****g hack subquery = test.testinstance_set.filter(student__cohort__in=cohorts).distinct() report = TemplateReport() report.file_format = format test_instances = ( test.active_testinstance_set.filter(answerinstance__points_earned__gt=0) .filter(pk__in=subquery) .annotate(Sum("answerinstance__points_earned")) ) test.benchmarks = Benchmark.objects.filter(question__test=test).distinct() points_possible = test.points_possible points_to_earn = 0.70 * test.points_possible number_gte_70 = ( test_instances.filter(pk__in=subquery) .filter(answerinstance__points_earned__sum__gte=points_to_earn) .count() ) total_test_takers = ( test_instances.filter(pk__in=subquery).filter(answerinstance__points_earned__gt=0).distinct().count() ) test.percent_gte_70 = float(number_gte_70) / total_test_takers test.report_average = test.get_average(cohorts=cohorts) for benchmark in test.benchmarks: # TODO: eliminate this subquery? is the idea to eliminate any questions that all students left unanswered? qb_subquery = test.question_set.filter( answerinstance__test_instance__student__cohort__in=cohorts ).distinct() question_benchmarks = test.question_set.filter(pk__in=qb_subquery).filter(benchmarks=benchmark).distinct() benchmark.points_possible = question_benchmarks.aggregate(Sum("point_value"))["point_value__sum"] benchmark.total_points_possible = benchmark.points_possible * test_instances.count() # Really think this should work...but nope. # benchmark.total_points_earned = question_benchmarks.aggregate(Sum('answerinstance__points_earned'))['answerinstance__points_earned__sum'] earned_sum = 0 for question_benchmark in question_benchmarks: for answer in question_benchmark.active_answerinstance_set.filter( test_instance__student__cohort__in=cohorts ).distinct(): earned_sum += answer.points_earned benchmark.total_points_earned = earned_sum benchmark.average = float(benchmark.total_points_earned) / benchmark.total_points_possible # Percent of students scoring at or above 70% test_instances_gte_70 = 0 for test_instance in test_instances: answers = test_instance.answerinstance_set.filter(question__benchmarks=benchmark) answers_points = answers.aggregate(Sum("points_earned"), Sum("points_possible")) instance_points_earned = answers_points["points_earned__sum"] instance_points_possible = answers_points["points_possible__sum"] if instance_points_earned and instance_points_possible: instance_average = float(instance_points_earned) / instance_points_possible if instance_average >= 0.70: test_instances_gte_70 += 1 benchmark.gte_70 = float(test_instances_gte_70) / test_instances.count() benchmark.assessed_on = "" for question_benchmark in question_benchmarks: benchmark.assessed_on += "{}, ".format(question_benchmark.get_order_start_one) benchmark.assessed_on = benchmark.assessed_on[:-2] test.questions = test.question_set.all() for question in test.questions: question.benchmark_text = "" for benchmark in question.benchmarks.all(): question.benchmark_text += "{}, ".format(benchmark.number) question.benchmark_text = question.benchmark_text[:-2] # grab all the AnswerInstances that we care about for this question answerinstances = question.answerinstance_set.filter(test_instance__student__cohort__in=cohorts).distinct() # nasty! http://stackoverflow.com/questions/4093910/django-aggregates-sums-in-postgresql-dont-use-distinct-is-this-a-bug/4917507#4917507 answerinstances = question.active_answerinstance_set.filter(pk__in=answerinstances) # calculate the COUNT of correct student responses for this question question.num_correct = answerinstances.filter(points_earned__gte=F("points_possible")).count() # calculate the COUNT of all student responses for this question question.num_total = answerinstances.count() # http://www.merriam-webster.com/dictionary/percent: "cent" means 100, but I'll stick with the existing convention question.percent_correct = float(question.num_correct) / question.num_total # calculate the sum of all points earned and the sum of all points possible for this question earned_possible = answerinstances.aggregate(Sum("points_earned"), Sum("points_possible")) question.points_earned = earned_possible["points_earned__sum"] question.points_possible = earned_possible["points_possible__sum"] question.percent_points_earned = float(question.points_earned) / question.points_possible report.data["test"] = test report.data["tests"] = test_instances report.filename = "Teacher Results for " + unicode(test) return report.pod_save(template)
def discipline_report_view(request): form = DisciplineStudentStatistics() merit_form = MeritForm() if request.method == 'POST': if 'merit' in request.POST: merit_form = MeritForm(request.POST) if merit_form.is_valid(): from ecwsp.sis.template_report import TemplateReport report = TemplateReport(request.user) report.filename = 'Merit Handouts' l1 = merit_form.cleaned_data['level_one'] l2 = merit_form.cleaned_data['level_two'] l3 = merit_form.cleaned_data['level_three'] l4 = merit_form.cleaned_data['level_four'] start_date = merit_form.cleaned_data['start_date'] end_date = merit_form.cleaned_data['end_date'] students = Student.objects.filter(inactive=False) if merit_form.cleaned_data['sort_by'] == 'year': students = students.order_by('year') elif merit_form.cleaned_data['sort_by'] == 'cohort': students = students.order_by('cache_cohort') disciplines = StudentDiscipline.objects.filter( date__range=(start_date, end_date)).values('students').annotate( Count('pk')) for student in students: disc = 0 for discipline in disciplines: if discipline['students'] == student.id: disc = discipline['pk__count'] break student.disc_count = disc if student.disc_count <= l1: student.merit_level = 1 elif student.disc_count <= l2: student.merit_level = 2 elif student.disc_count <= l3: student.merit_level = 3 elif student.disc_count <= l4: student.merit_level = 4 report.data['students'] = students template = Template.objects.get_or_create( name="Merit Level Handout")[0] template = template.get_template_path(request) if template: return report.pod_save(template) else: form = DisciplineStudentStatistics(request.POST) if form.is_valid(): data = [] start, end = form.get_dates() if 'student' in request.POST: students = Student.objects.all() school_year = SchoolYear.objects.get(active_year=True) if form.cleaned_data['date_begin']: if SchoolYear.objects.filter( end_date__gte=form.cleaned_data['date_begin']): school_year = SchoolYear.objects.filter( end_date__gte=form.cleaned_data['date_begin'] ).order_by('start_date')[0] # students belonging to this school year students = students.filter( course__marking_period__school_year__exact= school_year).distinct() if not form.cleaned_data['include_deleted']: students = students.exclude(inactive=True) if form.cleaned_data['order_by'] == "Year": students = students.order_by('year') subtitles = [ "Student", ] titles = [ "", "Infractions", ] for infr in Infraction.objects.all(): titles.append("") titles.pop() titles.append("Actions") for infr in Infraction.objects.all(): subtitles.append(unicode(infr)) for action in DisciplineAction.objects.all(): subtitles.append(unicode(action)) titles.append("") titles.pop() data.append(subtitles) pref = UserPreference.objects.get_or_create( user=request.user)[0] for student in students: disciplines = student.studentdiscipline_set.all() disciplines = disciplines.filter(date__range=(start, end)) stats = [ unicode(student), ] add = True for infr in Infraction.objects.all(): number = disciplines.filter( infraction=infr, students=student).count() stats.append(number) # check for filter if form.cleaned_data['infraction'] == infr: infraction_discipline = disciplines.filter( infraction=form.cleaned_data['infraction']) if number < form.cleaned_data[ 'minimum_infraction']: add = False for action in DisciplineAction.objects.all(): actions = disciplines.filter( disciplineactioninstance__action=action, students=student).count() stats.append(actions) # check for filter if form.cleaned_data['action'] == action: if actions < form.cleaned_data[ 'minimum_action']: add = False pref.get_additional_student_fields( stats, student, students, titles) if add: data.append(stats) report = XlReport(file_name="disc_stats") report.add_sheet(data, header_row=titles, title="Discipline Stats", heading="Discipline Stats") # By Teacher data = [] titles = ['teacher'] for action in DisciplineAction.objects.all(): titles.append(action) teachers = Faculty.objects.filter( studentdiscipline__isnull=False).distinct() disciplines = StudentDiscipline.objects.filter( date__range=(start, end)) for teacher in teachers: row = [teacher] for action in DisciplineAction.objects.all(): row.append( disciplines.filter(teacher=teacher, action=action).count()) data.append(row) report.add_sheet(data, header_row=titles, heading="By Teachers") return report.as_download() elif 'aggr' in request.POST: disciplines = StudentDiscipline.objects.filter( date__range=(start, end)) if form.cleaned_data['this_year']: school_start = SchoolYear.objects.get( active_year=True).start_date school_end = SchoolYear.objects.get( active_year=True).end_date disciplines = disciplines.filter( date__range=(school_start, school_end)) elif not form.cleaned_data[ 'this_year'] and not form.cleaned_data['all_years']: disciplines = disciplines.filter( date__range=(form.cleaned_data['date_begin'], form.cleaned_data['date_end'])) stats = [] titles = [] for infr in Infraction.objects.all(): titles.append(infr) number = disciplines.filter(infraction=infr).count() stats.append(number) for action in DisciplineAction.objects.all(): titles.append(action) number = 0 for a in DisciplineActionInstance.objects.filter( action=action): number += a.quantity stats.append(number) data.append(stats) report = XlReport(file_name="disc_stats") report.add_sheet(data, header_row=titles, title="Discipline Stats", heading="Discipline Stats") return report.as_download() else: return render_to_response( 'discipline/disc_report.html', { 'request': request, 'form': form }, RequestContext(request, {}), ) return render_to_response( 'discipline/disc_report.html', { 'request': request, 'form': form, 'merit_form': merit_form, }, RequestContext(request, {}), )
def download_teacher_results(self, test, format, template, cohorts=None): """ Make appy based report showing results for a whole class """ if not cohorts: cohorts = Cohort.objects.all() # Stupid f*****g hack subquery = test.testinstance_set.filter( student__cohort__in=cohorts).distinct() report = TemplateReport() report.file_format = format test_instances = test.active_testinstance_set.filter( answerinstance__points_earned__gt=0).filter( pk__in=subquery).annotate(Sum('answerinstance__points_earned')) test.benchmarks = Benchmark.objects.filter( question__test=test).distinct() points_possible = test.points_possible points_to_earn = 0.70 * test.points_possible number_gte_70 = test_instances.filter(pk__in=subquery).filter( answerinstance__points_earned__sum__gte=points_to_earn).count() total_test_takers = test_instances.filter(pk__in=subquery).filter( answerinstance__points_earned__gt=0).distinct().count() if total_test_takers: test.percent_gte_70 = float(number_gte_70) / total_test_takers else: test.percent_gte_70 = 0 test.report_average = test.get_average(cohorts=cohorts) for benchmark in test.benchmarks: # TODO: eliminate this subquery? is the idea to eliminate any questions that all students left unanswered? qb_subquery = test.question_set.filter( answerinstance__test_instance__student__cohort__in=cohorts ).distinct() question_benchmarks = test.question_set.filter( pk__in=qb_subquery).filter(benchmarks=benchmark).distinct() benchmark.points_possible = question_benchmarks.aggregate( Sum('point_value'))['point_value__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if benchmark.points_possible is None: benchmark.points_possible = 0 benchmark.total_points_possible = benchmark.points_possible * test_instances.count( ) # Really think this should work...but nope. #benchmark.total_points_earned = question_benchmarks.aggregate(Sum('answerinstance__points_earned'))['answerinstance__points_earned__sum'] earned_sum = 0 for question_benchmark in question_benchmarks: for answer in question_benchmark.active_answerinstance_set.filter( test_instance__student__cohort__in=cohorts).distinct(): earned_sum += answer.points_earned benchmark.total_points_earned = earned_sum if benchmark.total_points_possible: benchmark.average = float(benchmark.total_points_earned ) / benchmark.total_points_possible else: benchmark.average = 0 # Percent of students scoring at or above 70% test_instances_gte_70 = 0 for test_instance in test_instances: answers = test_instance.answerinstance_set.filter( question__benchmarks=benchmark) answers_points = answers.aggregate(Sum('points_earned'), Sum('points_possible')) instance_points_earned = answers_points['points_earned__sum'] instance_points_possible = answers_points[ 'points_possible__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if instance_points_earned is None: instance_points_earned = 0 if instance_points_possible is None: instance_points_possible = 0 if instance_points_earned and instance_points_possible: instance_average = float( instance_points_earned) / instance_points_possible if instance_average >= 0.70: test_instances_gte_70 += 1 if test_instances.count(): benchmark.gte_70 = float( test_instances_gte_70) / test_instances.count() else: benchmark.gte_70 = 0 benchmark.assessed_on = "" for question_benchmark in question_benchmarks: benchmark.assessed_on += "{}, ".format( question_benchmark.get_order_start_one) benchmark.assessed_on = benchmark.assessed_on[:-2] test.questions = test.question_set.all() for question in test.questions: question.benchmark_text = '' for benchmark in question.benchmarks.all(): question.benchmark_text += '{}, '.format(benchmark.number) question.benchmark_text = question.benchmark_text[:-2] # grab all the AnswerInstances that we care about for this question answerinstances = question.answerinstance_set.filter( test_instance__student__cohort__in=cohorts).distinct() # nasty! http://stackoverflow.com/questions/4093910/django-aggregates-sums-in-postgresql-dont-use-distinct-is-this-a-bug/4917507#4917507 answerinstances = question.active_answerinstance_set.filter( pk__in=answerinstances) # calculate the COUNT of correct student responses for this question question.num_correct = answerinstances.filter( points_earned__gte=F('points_possible')).count() # calculate the COUNT of all student responses for this question question.num_total = answerinstances.count() # http://www.merriam-webster.com/dictionary/percent: "cent" means 100, but I'll stick with the existing convention if question.num_total: question.percent_correct = float( question.num_correct) / question.num_total else: question.percent_correct = 0 # calculate the sum of all points earned and the sum of all points possible for this question earned_possible = answerinstances.aggregate( Sum('points_earned'), Sum('points_possible')) question.points_earned = earned_possible['points_earned__sum'] question.points_possible = earned_possible['points_possible__sum'] # aggregate() may return None, which will raise a TypeError upon attempting arithmetic if question.points_earned is None: question.points_earned = 0 if question.points_possible is None: question.points_possible = 0 if question.points_possible: question.percent_points_earned = float( question.points_earned) / question.points_possible else: question.percent_points_earned = 0 report.data['test'] = test report.data['tests'] = test_instances report.filename = 'Teacher Results for ' + unicode(test) return report.pod_save(template)