def evaluation(u=None, d=None): if u is None or d is None: return render_template("evaluate_landing.html") import numpy department = app.db.query(Department).join(University) \ .filter(University.abbreviation==u) \ .filter(Department.abbreviation==d) \ .first() if department is None: abort(404) # department not found # Retrieve the set of predicted and ground truth knowledge area labels # for each course. try: knowledge_areas = { 'predicted': { course.id: predicted_knowledge_areas(course, result_set=g.result_set_raw) for course in department.courses }, 'truth': { course.id: ground_truth_knowledge_areas(course) for course in department.courses }, } except RuntimeError: # Return empty knowledge area lists if an error is encountered. knowledge_areas = { 'predicted': {course.id: [] for course in department.courses}, 'truth': {course.id: [] for course in department.courses}, } # Calculate the jaccard coefficient and percentage correct of the # prediction/truth sets, use these as 'correctness' metrics. knowledge_areas['jaccard'] = { course.id: float( jaccard(knowledge_areas['predicted'][course.id], knowledge_areas['truth'][course.id])) for course in department.courses if knowledge_areas['truth'][course.id] } knowledge_areas['percent'] = { course.id: float(len(set(knowledge_areas['predicted'][course.id])\ .intersection(set(knowledge_areas['truth'][course.id])))\ / len(knowledge_areas['truth'][course.id])) for course in department.courses if knowledge_areas['truth'][course.id] } return render_template( "evaluate_department.html", department=department, knowledge_areas=knowledge_areas, )
def evaluation(u=None, d=None): if u is None or d is None: return render_template("evaluate_landing.html") import numpy department = app.db.query(Department).join(University) \ .filter(University.abbreviation==u) \ .filter(Department.abbreviation==d) \ .first() if department is None: abort(404) # department not found # Retrieve the set of predicted and ground truth knowledge area labels # for each course. try: knowledge_areas = { 'predicted': { course.id: predicted_knowledge_areas( course, result_set=g.result_set_raw) for course in department.courses }, 'truth': { course.id: ground_truth_knowledge_areas(course) for course in department.courses }, } except RuntimeError: # Return empty knowledge area lists if an error is encountered. knowledge_areas = { 'predicted': {course.id: [] for course in department.courses}, 'truth': {course.id: [] for course in department.courses}, } # Calculate the jaccard coefficient and percentage correct of the # prediction/truth sets, use these as 'correctness' metrics. knowledge_areas['jaccard'] = { course.id: float(jaccard( knowledge_areas['predicted'][course.id], knowledge_areas['truth'][course.id] )) for course in department.courses if knowledge_areas['truth'][course.id] } knowledge_areas['percent'] = { course.id: float(len(set(knowledge_areas['predicted'][course.id])\ .intersection(set(knowledge_areas['truth'][course.id])))\ / len(knowledge_areas['truth'][course.id])) for course in department.courses if knowledge_areas['truth'][course.id] } return render_template("evaluate_department.html", department=department, knowledge_areas=knowledge_areas,)
gmu_cs = session.query(Department).filter(Department.abbreviation=="CS")\ .join(University).filter(University.abbreviation=="GMU")\ .first() gmu = gmu_cs.university result_sets = session.query(ResultSet).all() print("Done.") print("Compute predicted and truth knowledge areas...") knowledge_areas = [ { 'predicted': { course.id: predicted_knowledge_areas(course, rs) for course in gmu_cs.courses }, 'truth': { course.id: ground_truth_knowledge_areas(course) for course in gmu_cs.courses }, } for rs in result_sets ] print("Done.") print("Calculate jaccard and percent metrics...") for ka_dict in knowledge_areas: ka_dict['jaccard'] = { course.id: jaccard( ka_dict['predicted'][course.id], ka_dict['truth'][course.id] ) for course in gmu_cs.courses if None not in [ ka_dict['predicted'][course.id],