def test_caches_published_course(self): course = mommy.make(Course, state='published') self.assertIsNone(cache.get('evap.staff.results.tools.calculate_results-{:d}'.format(course.id))) calculate_results(course) self.assertIsNotNone(cache.get('evap.staff.results.tools.calculate_results-{:d}'.format(course.id)))
def handle(self, *args, **options): print("Clearing cache...") cache.clear() print("Calculating results for all courses...") for course in Course.objects.all(): calculate_results(course) print("Done.")
def handle(self, *args, **options): self.stdout.write("Clearing cache...") cache.clear() self.stdout.write("Calculating results for all courses...") for course in Course.objects.all(): calculate_results(course) self.stdout.write("Done with updating cache.")
def handle(self, *args, **options): self.stdout.write("Clearing cache...") cache.clear() total_count = Course.objects.count() self.stdout.write("Calculating results for all courses...") self.stdout.ending = None progress_bar = ProgressBar(self.stdout, total_count) for counter, course in enumerate(Course.objects.all()): progress_bar.update(counter + 1) calculate_results(course) self.stdout.write("Results cache has been refreshed.\n")
def semester_detail(request, semester_id): semester = get_object_or_404(Semester, id=semester_id) courses = list(semester.course_set.filter(state="published").prefetch_related("degrees")) # Annotate each course object with its grades. for course in courses: course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results')) courses_by_degree = OrderedDict() for degree in Degree.objects.all(): courses_by_degree[degree] = CourseTuple([], []) for course in courses: if course.is_single_result: for degree in course.degrees.all(): section = calculate_results(course)[0] result = section.results[0] courses_by_degree[degree].single_results.append((course, result)) else: for degree in course.degrees.all(): courses_by_degree[degree].courses.append(course) template_data = dict(semester=semester, courses_by_degree=courses_by_degree, staff=request.user.is_staff) return render(request, "results_semester_detail.html", template_data)
def course_detail(request, semester_id, course_id): semester = get_object_or_404(Semester, id=semester_id) course = get_object_or_404(semester.course_set.filter(state="published"), id=course_id) sections = calculate_results(course, request.user.is_staff) if (request.user.is_staff == False): # if user is not a student representative # remove TextResults if user is neither the evaluated person (or a delegate) nor responsible for the course (or a delegate) for section in sections: if not user_can_see_textresults(request.user, course, section): for index, result in list(enumerate(section.results))[::-1]: if isinstance(section.results[index], TextResult): del section.results[index] # remove empty sections sections = [section for section in sections if section.results] # check whether results are published published = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE # show a publishing warning to fsr members when the results are not publicly available warning = (not published) and request.user.is_staff return render_to_response( "results_course_detail.html", dict( course=course, sections=sections, warning=warning ), context_instance=RequestContext(request))
def semester_detail(request, semester_id): semester = get_object_or_404(Semester, id=semester_id) courses = list(semester.course_set.filter(state="published").prefetch_related("degrees")) # annotate each course object with its grades for course in courses: course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results')) courses_by_degree = OrderedDict() for degree in Degree.objects.all(): courses_by_degree[degree] = CourseTuple([], []) for course in courses: if course.is_single_result(): for degree in course.degrees.all(): section = calculate_results(course)[0] result = section.results[0] courses_by_degree[degree].single_results.append((course, result)) else: for degree in course.degrees.all(): courses_by_degree[degree].courses.append(course) template_data = dict(semester=semester, courses_by_degree=courses_by_degree, staff=request.user.is_staff) return render(request, "results_semester_detail.html", template_data)
def course_detail(request, semester_id, course_id): semester = get_object_or_404(Semester, id=semester_id) course = get_object_or_404(semester.course_set.filter(state="published"), id=course_id) sections = calculate_results(course, request.user.is_staff) if (request.user.is_staff == False ): # if user is not a student representative # remove TextResults if user is neither the evaluated person (or a delegate) nor responsible for the course (or a delegate) for section in sections: if not user_can_see_textresults(request.user, course, section): for index, result in list(enumerate(section.results))[::-1]: if isinstance(section.results[index], TextResult): del section.results[index] # remove empty sections sections = [section for section in sections if section.results] # check whether results are published published = course.num_voters >= settings.MIN_ANSWER_COUNT and float( course.num_voters ) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE # show a publishing warning to fsr members when the results are not publicly available warning = (not published) and request.user.is_staff return render_to_response("results_course_detail.html", dict(course=course, sections=sections, warning=warning), context_instance=RequestContext(request))
def course_detail(request, semester_id, course_id): semester = get_object_or_404(Semester, id=semester_id) course = get_object_or_404(semester.course_set, id=course_id) if not course.can_user_see_results(request.user): raise PermissionDenied sections = calculate_results(course) public_view = request.GET.get('public_view', 'false') # default: show own view public_view = {'true': True, 'false': False}.get(public_view.lower()) # convert parameter to boolean for section in sections: results = [] for result in section.results: if isinstance(result, TextResult): answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, answer, public_view)] if answers: results.append(TextResult(question=result.question, answers=answers)) else: results.append(result) section.results[:] = results # filter empty sections and group by contributor course_sections = [] contributor_sections = OrderedDict() for section in sections: if not section.results: continue if section.contributor is None: course_sections.append(section) else: contributor_sections.setdefault(section.contributor, []).append(section) # show a warning if course is still in evaluation (for staff preview) evaluation_warning = course.state != 'published' # results for a course might not be visible because there are not enough answers # but it can still be "published" e.g. to show the comment results to contributors. # users who can open the results page see a warning message in this case sufficient_votes_warning = not course.can_publish_grades show_grades = request.user.is_staff or course.can_publish_grades course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) template_data = dict( course=course, course_sections=course_sections, contributor_sections=contributor_sections, evaluation_warning=evaluation_warning, sufficient_votes_warning=sufficient_votes_warning, show_grades=show_grades, staff=request.user.is_staff, contributor=course.is_user_contributor_or_delegate(request.user), can_download_grades=request.user.can_download_grades, public_view=public_view) return render(request, "results_course_detail.html", template_data)
def course_detail(request, semester_id, course_id): semester = get_object_or_404(Semester, id=semester_id) # staff can see preview of results even when course is not published if request.user.is_staff: course = get_object_or_404(semester.course_set.filter(state__in=['inEvaluation', 'evaluated', 'reviewed', 'published']), id=course_id) # everyone else can only see published results else: course = get_object_or_404(semester.course_set.filter(state="published"), id=course_id) sections = calculate_results(course, request.user.is_staff) if (request.user.is_staff == False): # if user is not a student representative # remove TextResults if user is neither the evaluated person (or a delegate) nor responsible for the course (or a delegate) for section in sections: if not user_can_see_textresults(request.user, course, section): for index, result in list(enumerate(section.results))[::-1]: if isinstance(section.results[index], TextResult): del section.results[index] # remove empty sections and group by contributor course_sections = [] contributor_sections = {} for section in sections: if not section.results: continue if section.contributor == None: course_sections.append(section) else: if section.contributor not in contributor_sections: contributor_sections[section.contributor] = [] contributor_sections[section.contributor].append(section) # show a warning if course is still in evaluation (for staff preview) evaluation_warning = course.state != 'published' # check whether course has a sufficient number of votes for publishing it sufficient_votes = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE # results for a course might not be visible because there are not enough answers # but it can still be "published" e.g. to show the comment results to lecturers # the FSR can still see all results but gets a warning message sufficient_votes_warning = (not sufficient_votes) and request.user.is_staff course.avg_grade, course.med_grade = calculate_average_and_medium_grades(course) return render_to_response( "results_course_detail.html", dict( course=course, course_sections=course_sections, contributor_sections=contributor_sections, evaluation_warning=evaluation_warning, sufficient_votes_warning=sufficient_votes_warning, staff=request.user.is_staff ), context_instance=RequestContext(request))
def course_detail(request, semester_id, course_id): semester = get_object_or_404(Semester, id=semester_id) course = get_object_or_404(semester.course_set, id=course_id) if not course.can_user_see_results(request.user): raise PermissionDenied sections = calculate_results(course, request.user.is_staff) if not request.user.is_staff: # remove TextResults if user is neither the evaluated person (or a delegate) nor responsible for the course (or a delegate) for section in sections: if not user_can_see_textresults(request.user, course, section): for i, result in list(enumerate(section.results))[::-1]: if isinstance(result, TextResult): del section.results[i] # remove empty sections and group by contributor course_sections = [] contributor_sections = OrderedDict() for section in sections: if not section.results: continue if section.contributor is None: course_sections.append(section) else: if section.contributor not in contributor_sections: contributor_sections[section.contributor] = [] contributor_sections[section.contributor].append(section) # show a warning if course is still in evaluation (for staff preview) evaluation_warning = course.state != 'published' # check whether course has a sufficient number of votes for publishing it sufficient_votes = course.num_voters >= settings.MIN_ANSWER_COUNT and float(course.num_voters) / course.num_participants >= settings.MIN_ANSWER_PERCENTAGE # results for a course might not be visible because there are not enough answers # but it can still be "published" e.g. to show the comment results to lecturers. # users who can open the results page see a warning message in this case sufficient_votes_warning = not sufficient_votes course.avg_grade, course.med_grade = calculate_average_and_medium_grades(course) return render_to_response( "results_course_detail.html", dict( course=course, course_sections=course_sections, contributor_sections=contributor_sections, evaluation_warning=evaluation_warning, sufficient_votes_warning=sufficient_votes_warning, staff=request.user.is_staff ), context_instance=RequestContext(request))
def course_detail(request, semester_id, course_id): semester = get_object_or_404(Semester, id=semester_id) course = get_object_or_404(semester.course_set.filter(state="published"), id=course_id) sections = calculate_results(course) if (request.user.is_staff != True): # remove all TextResults of other users for section in sections: if not ((section.lecturer == None and course.is_user_lecturer_or_ta(request.user)) or (section.lecturer == request.user)): for index, result in list(enumerate(section.results))[::-1]: if isinstance(section.results[index], TextResult): del section.results[index] # remove empty sections sections = [section for section in sections if section.results] return render_to_response( "results_course_detail.html", dict( course=course, sections=sections ), context_instance=RequestContext(request))
def course_detail(request, semester_id, course_id): semester = get_object_or_404(Semester, id=semester_id) course = get_object_or_404(semester.course_set.filter(state="published"), id=course_id) sections = calculate_results(course) if (request.user.is_staff != True): # don't remove TextResults for FSR members # remove TextResults if user is neither the evaluated person (or a delegate) nor responsible for the course (or a delegate) for section in sections: if not user_can_see_textresults(request.user, course, section): for index, result in list(enumerate(section.results))[::-1]: if isinstance(section.results[index], TextResult): del section.results[index] # remove empty sections sections = [section for section in sections if section.results] return render_to_response( "results_course_detail.html", dict( course=course, sections=sections ), context_instance=RequestContext(request))
def test_calculation_results(self): contributor1 = mommy.make(UserProfile) student = mommy.make(UserProfile) course = mommy.make(Course, state='published', participants=[student, contributor1]) questionnaire = mommy.make(Questionnaire) question = mommy.make(Question, questionnaire=questionnaire, type="G") contribution1 = mommy.make(Contribution, contributor=contributor1, course=course, questionnaires=[questionnaire]) mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=1, count=5) mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=2, count=15) mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=3, count=40) mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=4, count=60) mommy.make(RatingAnswerCounter, question=question, contribution=contribution1, answer=5, count=30) results = calculate_results(course) self.assertEqual(len(results), 1) self.assertEqual(len(results[0].results), 1) result = results[0].results[0] self.assertEqual(result.total_count, 150) self.assertAlmostEqual(result.average, float(109) / 30) self.assertAlmostEqual(result.deviation, 1.015983376941878)
def export(self, response, all=False): courses_with_results = list() for course in self.semester.course_set.filter(state="published").all(): results = SortedDict() for questionnaire, contributor, data, grade in calculate_results(course): results.setdefault(questionnaire.id, []).append((contributor, data, grade)) courses_with_results.append((course, results)) courses_with_results.sort(key=lambda cr: cr[0].kind) qn_frequencies = defaultdict(int) for course, results in courses_with_results: for questionnaire, results in results.items(): qn_frequencies[questionnaire] += 1 qn_relevant = qn_frequencies.items() qn_relevant.sort(key=lambda t: -t[1]) questionnaires = [Questionnaire.objects.get(id=t[0]) for t in qn_relevant] self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet(_(u"Results")) self.row = 0 self.col = 0 # formatting for average grades avg_style = xlwt.easyxf('alignment: horiz centre; font: bold on; borders: left medium, top medium, bottom medium') avg_style_good = xlwt.easyxf('pattern: pattern solid, fore_colour light_green; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0") avg_style_medium = xlwt.easyxf('pattern: pattern solid, fore_colour light_yellow; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0") avg_style_bad = xlwt.easyxf('pattern: pattern solid, fore_colour light_yellow; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0") # formatting for variances var_style_good = xlwt.easyxf('alignment: horiz centre; borders: right medium', num_format_str="0.0") var_style_medium = xlwt.easyxf('pattern: pattern solid, fore_colour gray25; alignment: horiz centre; borders: right medium', num_format_str="0.0") var_style_bad = xlwt.easyxf('pattern: pattern solid, fore_colour gray40; alignment: horiz centre; borders: right medium', num_format_str="0.0") # formatting for overall grades over_style_good = xlwt.easyxf('pattern: pattern solid, fore_colour light_green; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0") over_style_medium = xlwt.easyxf('pattern: pattern solid, fore_colour light_yellow; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0") over_style_bad = xlwt.easyxf('pattern: pattern solid, fore_colour light_yellow; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0") # formatting for special fields headline_style = xlwt.easyxf('font: bold on, height 400; alignment: horiz centre, vert centre, wrap on', num_format_str="0.0") course_style = xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium') course_unfinished_style = xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium; font: italic on') total_answers_style = xlwt.easyxf('alignment: horiz centre; borders: left medium, bottom medium, right medium') # general formattings bold_style = xlwt.easyxf('font: bold on') border_left_style = xlwt.easyxf('borders: left medium') border_right_style = xlwt.easyxf('borders: right medium') border_top_bottom_right_style = xlwt.easyxf('borders: top medium, bottom medium, right medium') self.writec(_(u"Evaluation {0} - created on {1}").format(self.semester.name, datetime.date.today()), headline_style) for course, results in courses_with_results: if course.state == "published": self.writec(course.name, course_style, cols=2) else: self.writec(course.name, course_unfinished_style, cols=2) self.writen() for course, results in courses_with_results: self.writec("Average", avg_style) self.writec("Variance", border_top_bottom_right_style) for questionnaire in questionnaires: self.writen(questionnaire.name, bold_style) for course, results in courses_with_results: self.writec(None, border_left_style) self.writec(None, border_right_style) for question_index, question in enumerate(questionnaire.question_set.all()): if question.is_text_question(): continue self.writen(question.text) for course, results in courses_with_results: qn_results = results.get(questionnaire.id, None) if qn_results: values = [] variances = [] enough_answers = True for contributor, data, grade in qn_results: for grade_result in data: if grade_result.question.id == question.id: if grade_result.average: values.append(grade_result.average) variances.append(grade_result.variance) if not grade_result.show: enough_answers = False break if values and (enough_answers or all): avg = sum(values) / len(values) if avg < 2: self.writec(avg, avg_style_good) elif avg < 3: self.writec(avg, avg_style_medium) else: self.writec(avg, avg_style_bad) var = sum(variances) / len(variances) if var < 0.5: self.writec(var, var_style_good) elif var < 1: self.writec(var, var_style_medium) else: self.writec(var, var_style_bad) else: self.writec(None, border_left_style) self.writec(None, border_right_style) else: self.writec(None, border_left_style) self.writec(None, border_right_style) self.writen(None) for course, results in courses_with_results: self.writec(None, border_left_style) self.writec(None, border_right_style) self.writen(_(u"Overall Grade"), bold_style) for course, results in courses_with_results: avg = calculate_average_grade(course) if avg: if avg < 2: self.writec(avg, over_style_good, cols=2) elif avg < 3: self.writec(avg, over_style_medium, cols=2) else: self.writec(avg, over_style_bad, cols=2) else: self.writec(None, border_left_style) self.writec(None, border_right_style) self.writen(_(u"Total Answers"), bold_style) for course, results in courses_with_results: self.writec(course.num_voters, total_answers_style, cols=2) self.workbook.save(response)
def export(self, response, ignore_not_enough_answers=False): courses_with_results = list() for course in self.semester.course_set.filter(state="published").all(): results = OrderedDict() for questionnaire, contributor, label, data, section_warning in calculate_results(course): results.setdefault(questionnaire.id, []).extend(data) courses_with_results.append((course, results)) courses_with_results.sort(key=lambda cr: cr[0].type) qn_frequencies = defaultdict(int) for course, results in courses_with_results: for questionnaire, results in results.items(): qn_frequencies[questionnaire] += 1 qn_relevant = list(qn_frequencies.items()) qn_relevant.sort(key=lambda t: -t[1]) questionnaires = [Questionnaire.objects.get(id=t[0]) for t in qn_relevant] self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet(_("Results")) self.row = 0 self.col = 0 self.init_styles(self.workbook) writec(self, _("Evaluation {0} - created on {1}").format(self.semester.name, datetime.date.today()), "headline") for course, results in courses_with_results: if course.state == "published": writec(self, course.name, "course", cols=2) else: writec(self, course.name, "course_unfinished", cols=2) writen(self) for course, results in courses_with_results: writec(self, "Average", "avg") writec(self, "Deviation", "border_top_bottom_right") for questionnaire in questionnaires: writen(self, questionnaire.name, "bold") for course, results in courses_with_results: self.write_two_empty_cells_with_borders() for question in questionnaire.question_set.all(): if question.is_text_question: continue writen(self, question.text) for course, results in courses_with_results: qn_results = results.get(questionnaire.id, None) if qn_results: values = [] deviations = [] for grade_result in qn_results: if grade_result.question.id == question.id: if grade_result.average: values.append(grade_result.average) deviations.append(grade_result.deviation) break enough_answers = course.can_publish_grades if values and (enough_answers or ignore_not_enough_answers): avg = sum(values) / len(values) writec(self, avg, self.grade_to_style(avg)) dev = sum(deviations) / len(deviations) writec(self, dev, self.deviation_to_style(dev)) else: self.write_two_empty_cells_with_borders() else: self.write_two_empty_cells_with_borders() writen(self, None) for course, results in courses_with_results: self.write_two_empty_cells_with_borders() writen(self, _("Overall Average Grade"), "bold") for course, results in courses_with_results: avg, dev = calculate_average_grades_and_deviation(course) if avg: writec(self, avg, self.grade_to_style(avg), cols=2) else: self.write_two_empty_cells_with_borders() writen(self, _("Overall Average Standard Deviation"), "bold") for course, results in courses_with_results: avg, dev = calculate_average_grades_and_deviation(course) if dev is not None: writec(self, dev, self.deviation_to_style(dev), cols=2) else: self.write_two_empty_cells_with_borders() writen(self, _("Total Voters/Total Participants"), "bold") for course, results in courses_with_results: percent_participants = float(course.num_voters)/float(course.num_participants) writec(self, "{}/{} ({:.0%})".format(course.num_voters, course.num_participants, percent_participants), "total_voters", cols=2) self.workbook.save(response)
def export(self, response, all=False): courses_with_results = list() for course in self.semester.course_set.filter(state="published").all(): results = SortedDict() for questionnaire, contributor, data, grade in calculate_results(course): results.setdefault(questionnaire.id, []).append((contributor, data, grade)) courses_with_results.append((course, results)) courses_with_results.sort(key=lambda cr: cr[0].kind) qn_frequencies = defaultdict(int) for course, results in courses_with_results: for questionnaire, results in results.items(): qn_frequencies[questionnaire] += 1 qn_relevant = qn_frequencies.items() qn_relevant.sort(key=lambda t: -t[1]) questionnaires = [Questionnaire.objects.get(id=t[0]) for t in qn_relevant] self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet(_(u"Results")) self.row = 0 self.col = 0 # Adding evaP colors to palette xlwt.add_palette_colour("custom_dark_green", 0x20) self.workbook.set_colour_RGB(0x20, 120, 241, 89) xlwt.add_palette_colour("custom_light_green", 0x21) self.workbook.set_colour_RGB(0x21, 188, 241, 89) xlwt.add_palette_colour("custom_yellow", 0x22) self.workbook.set_colour_RGB(0x22, 241, 226, 89) xlwt.add_palette_colour("custom_orange", 0x23) self.workbook.set_colour_RGB(0x23, 241, 158, 89) xlwt.add_palette_colour("custom_red", 0x24) self.workbook.set_colour_RGB(0x24, 241, 89, 89) # formatting for average grades avg_style = xlwt.easyxf('alignment: horiz centre; font: bold on; borders: left medium, top medium, bottom medium') avg_style_very_good = xlwt.easyxf('pattern: pattern solid, fore_colour custom_dark_green; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0") avg_style_good = xlwt.easyxf('pattern: pattern solid, fore_colour custom_light_green; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0") avg_style_medium = xlwt.easyxf('pattern: pattern solid, fore_colour custom_yellow; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0") avg_style_bad = xlwt.easyxf('pattern: pattern solid, fore_colour custom_orange; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0") avg_style_very_bad = xlwt.easyxf('pattern: pattern solid, fore_colour custom_red; alignment: horiz centre; font: bold on; borders: left medium', num_format_str="0.0") # formatting for variances var_style_good = xlwt.easyxf('alignment: horiz centre; borders: right medium', num_format_str="0.0") var_style_medium = xlwt.easyxf('pattern: pattern solid, fore_colour gray25; alignment: horiz centre; borders: right medium', num_format_str="0.0") var_style_bad = xlwt.easyxf('pattern: pattern solid, fore_colour gray40; alignment: horiz centre; borders: right medium', num_format_str="0.0") # formatting for overall grades over_style_very_good = xlwt.easyxf('pattern: pattern solid, fore_colour custom_dark_green; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0") over_style_good = xlwt.easyxf('pattern: pattern solid, fore_colour custom_light_green; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0") over_style_medium = xlwt.easyxf('pattern: pattern solid, fore_colour custom_yellow; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0") over_style_bad = xlwt.easyxf('pattern: pattern solid, fore_colour custom_orange; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0") over_style_very_bad = xlwt.easyxf('pattern: pattern solid, fore_colour custom_red; alignment: horiz centre; font: bold on; borders: left medium, right medium', num_format_str="0.0") # formatting for special fields headline_style = xlwt.easyxf('font: bold on, height 400; alignment: horiz centre, vert centre, wrap on', num_format_str="0.0") course_style = xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium') course_unfinished_style = xlwt.easyxf('alignment: horiz centre, wrap on, rota 90; borders: left medium, top medium; font: italic on') total_answers_style = xlwt.easyxf('alignment: horiz centre; borders: left medium, bottom medium, right medium') # general formattings bold_style = xlwt.easyxf('font: bold on') border_left_style = xlwt.easyxf('borders: left medium') border_right_style = xlwt.easyxf('borders: right medium') border_top_bottom_right_style = xlwt.easyxf('borders: top medium, bottom medium, right medium') self.writec(_(u"Evaluation {0} - created on {1}").format(self.semester.name, datetime.date.today()), headline_style) for course, results in courses_with_results: if course.state == "published": self.writec(course.name, course_style, cols=2) else: self.writec(course.name, course_unfinished_style, cols=2) self.writen() for course, results in courses_with_results: self.writec("Average", avg_style) self.writec("Variance", border_top_bottom_right_style) for questionnaire in questionnaires: self.writen(questionnaire.name, bold_style) for course, results in courses_with_results: self.writec(None, border_left_style) self.writec(None, border_right_style) for question_index, question in enumerate(questionnaire.question_set.all()): if question.is_text_question(): continue self.writen(question.text) for course, results in courses_with_results: qn_results = results.get(questionnaire.id, None) if qn_results: values = [] variances = [] enough_answers = True for contributor, data, grade in qn_results: for grade_result in data: if grade_result.question.id == question.id: if grade_result.average: values.append(grade_result.average) variances.append(grade_result.variance) if not grade_result.show: enough_answers = False break if values and (enough_answers or all): avg = sum(values) / len(values) if avg < 1.5: self.writec(avg, avg_style_very_good) elif avg < 2.5: self.writec(avg, avg_style_good) elif avg < 3.5: self.writec(avg, avg_style_medium) elif avg < 4.5: self.writec(avg, avg_style_bad) else: self.writec(avg, avg_style_very_bad) var = sum(variances) / len(variances) if var < 0.5: self.writec(var, var_style_good) elif var < 1: self.writec(var, var_style_medium) else: self.writec(var, var_style_bad) else: self.writec(None, border_left_style) self.writec(None, border_right_style) else: self.writec(None, border_left_style) self.writec(None, border_right_style) self.writen(None) for course, results in courses_with_results: self.writec(None, border_left_style) self.writec(None, border_right_style) self.writen(_(u"Overall Grade"), bold_style) for course, results in courses_with_results: avg = calculate_average_grade(course) if avg: if avg < 1.5: self.writec(avg, over_style_very_good, cols=2) elif avg < 2.5: self.writec(avg, over_style_good, cols=2) elif avg < 3.5: self.writec(avg, over_style_medium, cols=2) elif avg < 4.5: self.writec(avg, over_style_bad, cols=2) else: self.writec(avg, over_style_very_bad, cols=2) else: self.writec(None, border_left_style) self.writec(None, border_right_style) self.writen(_(u"Total Answers"), bold_style) for course, results in courses_with_results: self.writec(course.num_voters, total_answers_style, cols=2) self.workbook.save(response)
def course_detail(request, semester_id, course_id): semester = get_object_or_404(Semester, id=semester_id) course = get_object_or_404(semester.course_set, id=course_id, semester=semester) if not course.can_user_see_results(request.user): raise PermissionDenied sections = calculate_results(course) public_view = request.GET.get('public_view', 'false') # Default: show own view. public_view = {'true': True, 'false': False}.get(public_view.lower()) # Convert parameter to boolean. represented_users = list(request.user.represented_users.all()) represented_users.append(request.user) for section in sections: results = [] for result in section.results: if isinstance(result, TextResult): answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)] if answers: results.append(TextResult(question=result.question, answers=answers)) else: results.append(result) section.results[:] = results # Filter empty sections and group by contributor. course_sections = [] contributor_sections = OrderedDict() for section in sections: if not section.results: continue if section.contributor is None: course_sections.append(section) else: contributor_sections.setdefault(section.contributor, {'total_votes': 0, 'sections': []})['sections'].append(section) # Sum up all Sections for this contributor. # If section is not a RatingResult: # Add 1 as we assume it is a TextResult or something similar that should be displayed. contributor_sections[section.contributor]['total_votes'] +=\ sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results]) # Show a warning if course is still in evaluation (for staff preview). evaluation_warning = course.state != 'published' # Results for a course might not be visible because there are not enough answers # but it can still be "published" e.g. to show the comment results to contributors. # Users who can open the results page see a warning message in this case. sufficient_votes_warning = not course.can_publish_grades show_grades = request.user.is_staff or course.can_publish_grades course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) template_data = dict( course=course, course_sections=course_sections, contributor_sections=contributor_sections, evaluation_warning=evaluation_warning, sufficient_votes_warning=sufficient_votes_warning, show_grades=show_grades, staff=request.user.is_staff, contributor=course.is_user_contributor_or_delegate(request.user), can_download_grades=request.user.can_download_grades, public_view=public_view) return render(request, "results_course_detail.html", template_data)
def course_detail(request, semester_id, course_id): semester = get_object_or_404(Semester, id=semester_id) course = get_object_or_404(semester.course_set, id=course_id, semester=semester) if not course.can_user_see_results(request.user): raise PermissionDenied sections = calculate_results(course) public_view = request.GET.get('public_view', 'false') # Default: show own view. public_view = { 'true': True, 'false': False }.get(public_view.lower()) # Convert parameter to boolean. represented_users = list(request.user.represented_users.all()) represented_users.append(request.user) for section in sections: results = [] for result in section.results: if isinstance(result, TextResult): answers = [ answer for answer in result.answers if user_can_see_text_answer( request.user, represented_users, answer, public_view) ] if answers: results.append( TextResult(question=result.question, answers=answers)) else: results.append(result) section.results[:] = results # Filter empty sections and group by contributor. course_sections = [] contributor_sections = OrderedDict() for section in sections: if not section.results: continue if section.contributor is None: course_sections.append(section) else: contributor_sections.setdefault(section.contributor, { 'total_votes': 0, 'sections': [] })['sections'].append(section) # Sum up all Sections for this contributor. # If section is not a RatingResult: # Add 1 as we assume it is a TextResult or something similar that should be displayed. contributor_sections[section.contributor]['total_votes'] +=\ sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results]) # Show a warning if course is still in evaluation (for staff preview). evaluation_warning = course.state != 'published' # Results for a course might not be visible because there are not enough answers # but it can still be "published" e.g. to show the comment results to contributors. # Users who can open the results page see a warning message in this case. sufficient_votes_warning = not course.can_publish_grades show_grades = request.user.is_staff or course.can_publish_grades course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation( course) template_data = dict(course=course, course_sections=course_sections, contributor_sections=contributor_sections, evaluation_warning=evaluation_warning, sufficient_votes_warning=sufficient_votes_warning, show_grades=show_grades, staff=request.user.is_staff, contributor=course.is_user_contributor_or_delegate( request.user), can_download_grades=request.user.can_download_grades, public_view=public_view) return render(request, "results_course_detail.html", template_data)
def export(self, response, all=False): courses_with_results = list() for course in self.semester.course_set.filter(state="published").all(): results = SortedDict() for questionnaire, contributor, data, avg_likert, med_likert, avg_grade, med_grade, avg_total, med_total in calculate_results(course): results.setdefault(questionnaire.id, []).append((contributor, data, avg_total, med_total)) courses_with_results.append((course, results)) courses_with_results.sort(key=lambda cr: cr[0].kind) qn_frequencies = defaultdict(int) for course, results in courses_with_results: for questionnaire, results in results.items(): qn_frequencies[questionnaire] += 1 qn_relevant = qn_frequencies.items() qn_relevant.sort(key=lambda t: -t[1]) questionnaires = [Questionnaire.objects.get(id=t[0]) for t in qn_relevant] self.workbook = xlwt.Workbook() self.sheet = self.workbook.add_sheet(_(u"Results")) self.row = 0 self.col = 0 self.add_color_palette_to_workbook(self.workbook) self.writec(_(u"Evaluation {0} - created on {1}").format(self.semester.name, datetime.date.today()), "headline") for course, results in courses_with_results: if course.state == "published": self.writec(course.name, "course", cols=2) else: self.writec(course.name, "course_unfinished", cols=2) self.writen() for course, results in courses_with_results: self.writec("Average", "avg") self.writec("Variance", "border_top_bottom_right") for questionnaire in questionnaires: self.writen(questionnaire.name, "bold") for course, results in courses_with_results: self.write_two_empty_cells_with_borders() for question_index, question in enumerate(questionnaire.question_set.all()): if question.is_text_question(): continue self.writen(question.text) for course, results in courses_with_results: qn_results = results.get(questionnaire.id, None) if qn_results: values = [] variances = [] enough_answers = True for contributor, data, avg_grade, med_grade in qn_results: for grade_result in data: if grade_result.question.id == question.id: if grade_result.average: values.append(grade_result.average) variances.append(grade_result.variance) if not grade_result.show: enough_answers = False break if values and (enough_answers or all): avg = sum(values) / len(values) self.writec(avg, ExcelExporter.grade_to_style(avg)); var = sum(variances) / len(variances) self.writec(var, ExcelExporter.variance_to_style(var)) else: self.write_two_empty_cells_with_borders() else: self.write_two_empty_cells_with_borders() self.writen(None) for course, results in courses_with_results: self.write_two_empty_cells_with_borders() self.writen(_(u"Overall Average Grade"), "bold") for course, results in courses_with_results: avg, med = calculate_average_and_medium_grades(course) if avg: self.writec(avg, ExcelExporter.grade_to_style(avg), cols=2) else: self.write_two_empty_cells_with_borders() self.writen(_(u"Overall Median Grade"), "bold") for course, results in courses_with_results: avg, med = calculate_average_and_medium_grades(course) if med: self.writec(med, ExcelExporter.grade_to_style(med), cols=2) else: self.write_two_empty_cells_with_borders() self.writen(_(u"Total Answers"), "bold") for course, results in courses_with_results: self.writec(course.num_voters, "total_answers", cols=2) self.workbook.save(response)
def export(self, response, course_types_list, ignore_not_enough_answers=False, include_unpublished=False): self.workbook = xlwt.Workbook() self.init_styles(self.workbook) counter = 1 for course_types in course_types_list: self.sheet = self.workbook.add_sheet("Sheet " + str(counter)) counter += 1 self.row = 0 self.col = 0 courses_with_results = list() course_states = ['published'] if include_unpublished: course_states.extend(['evaluated', 'reviewed']) used_questionnaires = set() for course in self.semester.course_set.filter(state__in=course_states, type__in=course_types).all(): if course.is_single_result(): continue results = OrderedDict() for questionnaire, contributor, label, data, section_warning in calculate_results(course): if has_no_rating_answers(course, contributor, questionnaire): continue results.setdefault(questionnaire.id, []).extend(data) used_questionnaires.add(questionnaire) courses_with_results.append((course, results)) courses_with_results.sort(key=lambda cr: cr[0].type) used_questionnaires = sorted(used_questionnaires) writec(self, _("Evaluation {0}\n\n{1}").format(self.semester.name, ", ".join(course_types)), "headline") for course, results in courses_with_results: writec(self, course.name, "course", cols=2) writen(self) for course, results in courses_with_results: writec(self, "Average", "avg") writec(self, "Deviation", "border_top_bottom_right") for questionnaire in used_questionnaires: writen(self, questionnaire.name, "bold") for course, results in courses_with_results: self.write_two_empty_cells_with_borders() for question in questionnaire.question_set.all(): if question.is_text_question: continue writen(self, question.text) for course, results in courses_with_results: if questionnaire.id not in results: self.write_two_empty_cells_with_borders() continue qn_results = results[questionnaire.id] values = [] deviations = [] total_count = 0 for grade_result in qn_results: if grade_result.question.id == question.id: if grade_result.average: values.append(grade_result.average * grade_result.total_count) deviations.append(grade_result.deviation * grade_result.total_count) total_count += grade_result.total_count enough_answers = course.can_publish_grades if values and (enough_answers or ignore_not_enough_answers): avg = sum(values) / total_count writec(self, avg, self.grade_to_style(avg)) dev = sum(deviations) / total_count writec(self, dev, self.deviation_to_style(dev)) else: self.write_two_empty_cells_with_borders() writen(self, None) for course, results in courses_with_results: self.write_two_empty_cells_with_borders() writen(self, _("Overall Average Grade"), "bold") for course, results in courses_with_results: avg, dev = calculate_average_grades_and_deviation(course) if avg: writec(self, avg, self.grade_to_style(avg, total=True), cols=2) else: self.write_two_empty_cells_with_borders() writen(self, _("Overall Average Standard Deviation"), "bold") for course, results in courses_with_results: avg, dev = calculate_average_grades_and_deviation(course) if dev is not None: writec(self, dev, self.deviation_to_style(dev, total=True), cols=2) else: self.write_two_empty_cells_with_borders() writen(self, _("Total Voters/Total Participants"), "bold") for course, results in courses_with_results: percent_participants = float(course.num_voters)/float(course.num_participants) if course.num_participants > 0 else 0 writec(self, "{}/{} ({:.0%})".format(course.num_voters, course.num_participants, percent_participants), "total_voters", cols=2) self.workbook.save(response)