def view_submissions_to_grade(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] try: exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 if exam.mode=="draft": exam = exam.image submitters = ExamRecord.objects.filter(exam=exam, complete=True, time_created__lt=exam.grace_period).values('student').distinct() fname = course_prefix+"-"+course_suffix+"-"+exam_slug+"-"+datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")+".csv" temp_file_path = FILE_DIR+"/"+fname outfile = open(temp_file_path,"w+") could_not_parse = "" for s in submitters: #yes, there is sql in a loop here. We'll optimize later latest_sub = ExamRecord.objects.values('student__username', 'time_created', 'json_data').filter(exam=exam, time_created__lt=exam.grace_period, student=s['student']).latest('time_created') try: sub_obj = json.loads(latest_sub['json_data']).iteritems() for k,v in sub_obj: outstring = '"%s","%s","%s"\n' % (latest_sub['student__username'], k, parse_val(v)) outfile.write(outstring) except ValueError: could_not_parse += latest_sub['student__username']+ " " #Don't output if the latest submission was erroneous outfile.write("\n") #if there were items we could not parse if could_not_parse: #write the usernames at the beginning of the file outfile.seek(0) data=outfile.read() outfile.seek(0) outfile.truncate() outfile.write("Could not parse data from the following users: " + could_not_parse + "\n") outfile.write(data) if is_storage_local(): outfile.close() reports_dir = local_storage_root_dir() + "/" + course_prefix + "/" + course_suffix + "/reports/" if not default_storage.exists(reports_dir): os.mkdir(reports_dir) copyfile(temp_file_path, reports_dir + fname) file_url = local_file_server_root() + "/" + course_prefix + "/" + course_suffix + "/reports/" + fname return HttpResponseRedirect(file_url) else: #write to S3 secure_file_storage = S3BotoStorage(bucket=AWS_SECURE_STORAGE_BUCKET_NAME, access_key=AWS_ACCESS_KEY_ID, secret_key=AWS_SECRET_ACCESS_KEY) s3file = secure_file_storage.open("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname),'w') outfile.seek(0) s3file.write(outfile.read()) s3file.close() outfile.close() return HttpResponseRedirect(secure_file_storage.url_monkeypatched("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), response_headers={'response-content-disposition': 'attachment'}))
def view_csv_grades(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] try: exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 if course.mode=="draft": course = course.image if exam.mode=="draft": exam = exam.image graded_students = ExamScore.objects.filter(course=course, exam=exam).values('student','student__username').distinct() fname = course_prefix+"-"+course_suffix+"-"+exam_slug+"-grades-"+datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")+".csv" temp_file_path = FILE_DIR+"/"+fname outfile = open(temp_file_path,"w+") could_not_parse = "" for s in graded_students: #yes, there is sql in a loop here. We'll optimize later score_obj = ExamScore.objects.get(course=course, exam=exam, student=s['student']) outstring = '"%s","%s"\n' % (s['student__username'], score_obj.score) outfile.write(outstring) subscores = ExamScoreField.objects.filter(parent=score_obj) for field in subscores: outstring = '"%s","%s","%s"\n' % (s['student__username'], field.field_name, str(field.subscore)) outfile.write(outstring) outfile.write("\n") if is_storage_local(): outfile.close() reports_dir = local_storage_root_dir() + "/" + course_prefix + "/" + course_suffix + "/reports/" if not default_storage.exists(reports_dir): os.mkdir(reports_dir) copyfile(temp_file_path, reports_dir + fname) file_url = local_file_server_root() + "/" + course_prefix + "/" + course_suffix + "/reports/" + fname return HttpResponseRedirect(file_url) else: #write to S3 secure_file_storage = S3BotoStorage(bucket=AWS_SECURE_STORAGE_BUCKET_NAME, access_key=AWS_ACCESS_KEY_ID, secret_key=AWS_SECRET_ACCESS_KEY) s3file = secure_file_storage.open("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname),'w') outfile.seek(0) s3file.write(outfile.read()) s3file.close() outfile.close() return HttpResponseRedirect(secure_file_storage.url("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), response_headers={'response-content-disposition': 'attachment'}))
def get_full_contentsection_list(course, filter_children=True): """Return a list of ContentSections with material and a list of all material for this course.""" def no_child_filter(t=None, i=None): return True level2_items = {} # level2_items gets filled lazily def working_child_filter(t, i): if len(level2_items) == 0: for cg2 in ContentGroup.objects.filter(course=course).filter(level=2): cg2_t = cg2.get_content_type() level2_items.setdefault(cg2_t, []).append(getattr(cg2, cg2_t).id) if level2_items.has_key(t): return i not in level2_items[t] else: return True desired_item = working_child_filter if not filter_children: desired_item = no_child_filter tagged_object_lists = {} for tag, cls in ContentGroup.groupable_types.iteritems(): tagged_object_lists[tag] = cls.objects.getByCourse(course=course) full_index_list = [] full_contentsection_list=[] for contentsection in ContentSection.objects.getByCourse(course=course): index_list = [] cs_id = contentsection.id for tag in ContentGroup.groupable_types.keys(): for t in tagged_object_lists[tag].filter(section_id=cs_id): t_id = t.id if desired_item(tag, t_id): if tag != 'file': index_list.append((tag, t.index, t_id, cs_id, t.slug, t.title)) else: icon_type = t.get_icon_type() index_list.append((tag, t.index, t_id, cs_id, local_file_server_root() + "/" + t.file.url, t.title, icon_type)) full_index_list.append(sorted(index_list, key = index)) if index_list: # don't show empty sections full_contentsection_list.append(contentsection) return full_contentsection_list, full_index_list
def view(request, course_prefix, course_suffix, slug): common_page_data = request.common_page_data try: #getByCourse takes care of checking for draft vs live, is_deleted and live times video = Video.objects.getByCourse(course=common_page_data['course']).get(slug=slug) except Video.DoesNotExist: raise Http404 if not common_page_data['is_course_admin']: visit_log = PageVisitLog( course = common_page_data['ready_course'], user = request.user, page_type= 'video', object_id = str(video.id), ) visit_log.save() if not 'video_quiz_mode' in request.session: #Default to include quizzes in viewing videos request.session['video_quiz_mode'] = "quizzes included" videos = Video.objects.getByCourse(course=common_page_data['course']) #Get index of current video cur_index = None #just code safety for index, item in enumerate(videos): if item == video: cur_index = index break #code safety next_slug = None prev_slug = None if cur_index is not None: if cur_index > 0: prev_slug = videos[cur_index-1].slug else: prev_slug = None if cur_index < videos.count() - 1: next_slug = videos[cur_index+1].slug else: next_slug = None video_rec = request.user.videoactivity_set.filter(video=video) if video_rec: video_rec = video_rec[0] else: #note student field to be renamed to user, VideoActivity for all users now video_rec = VideoActivity(student=request.user, course=common_page_data['course'], video=video) video_rec.save() course = common_page_data['course'] full_contentsection_list, full_index_list = get_full_contentsection_list(course, filter_children=True) if request.user.is_authenticated(): is_logged_in = 1 else: is_logged_in = 0 key = ('video', video.id) l1items, l2items = get_contentgroup_data(course=course) downloadable_content = get_children(key, l1items, l2items) if video.exam: try: #exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) exam = video.exam display_single = exam.display_single invideo = exam.invideo metadata_dom = parseString(exam.xml_metadata) #The DOM corresponding to the XML metadata video_questions = metadata_dom.getElementsByTagName('video') question_times = {} for video_node in video_questions: video_slug = video_node.getAttribute("url-identifier") if video_slug == "": video_slug = video_node.getAttribute("url_identifier") if video_slug == video.slug: question_children = video_node.getElementsByTagName("question") times = [] for question in question_children: time = "sec_%s" % question.getAttribute("time") if time not in question_times: question_times[time] = [] question_times[time].append(question.getAttribute("id")) print json.dumps(question_times) except Exam.DoesNotExist: raise Http404 else: sections = ContentSection.objects.getByCourse(course) section = sections[0] # create fake exam as exam template (which is what displays all videos) needs exam data to function # correctly (TODO: Refactor this) exam = Exam(course=course, slug=slug, title=video.title, description="Empty Exam", html_content="", xml_metadata="", due_date='', assessment_type="survey", mode="draft", total_score=0, grade_single=0, grace_period='', partial_credit_deadline='', late_penalty=0, submissions_permitted=0, resubmission_penalty=0, exam_type="survey", autograde=0, display_single=0, invideo=1, section=section,) exam.live_datetime = video.live_datetime # needed so video shows up question_times = "" videoURL = None thumbnailPath = None if is_storage_local(): videoURL = local_file_server_root() + "/" + str(video.file) thumbnailPath = local_file_server_root() + "/" + course.prefix + "/" + course.suffix + "/videos/" + str(video.id if video.mode == 'draft' else video.image.id) + "/jpegs/" elif video.url: videoURL = "http://www.youtube.com/embed/" + (video.url if video.mode == 'draft' else video.image.url) + "?autoplay=0&wmode=transparent&fs=0&rel=0&modestbranding=1&showinfo=0&start=0&enablejsapi=1&disablekb=1&" thumbnailPath = "http://" + settings.AWS_STORAGE_BUCKET_NAME + ".s3-website-us-west-2.amazonaws.com/" + course.prefix + "/" + course.suffix + "/videos/" + str(video.id if video.mode == 'draft' else video.image.id) + "/jpegs/" # change from 'videos/view.html' to 'exams/view_exam.html' return render_to_response('exams/view_exam.html', { 'common_page_data': common_page_data, 'video': video, 'video_rec': video_rec, 'videoURL': videoURL, 'thumbnailPath': thumbnailPath, 'prev_slug': prev_slug, 'next_slug': next_slug, 'contentsection_list': full_contentsection_list, 'full_index_list': full_index_list, 'is_logged_in': is_logged_in, 'downloadable_content':downloadable_content, 'json_pre_pop':"{}", 'scores':"{}", 'editable':True, 'single_question':exam.display_single, 'videotest':exam.invideo, 'question_times':json.dumps(question_times), 'allow_submit':True, 'children': downloadable_content, 'exam':exam }, context_instance=RequestContext(request))
def view(request, course_prefix, course_suffix, slug): common_page_data = request.common_page_data try: #getByCourse takes care of checking for draft vs live, is_deleted and live times video = Video.objects.getByCourse( course=common_page_data['course']).get(slug=slug) except Video.DoesNotExist: raise Http404 if not common_page_data['is_course_admin']: visit_log = PageVisitLog( course=common_page_data['ready_course'], user=request.user, page_type='video', object_id=str(video.id), ) visit_log.save() if not 'video_quiz_mode' in request.session: #Default to include quizzes in viewing videos request.session['video_quiz_mode'] = "quizzes included" videos = Video.objects.getByCourse(course=common_page_data['course']) #Get index of current video cur_index = None #just code safety for index, item in enumerate(videos): if item == video: cur_index = index break #code safety next_slug = None prev_slug = None if cur_index is not None: if cur_index > 0: prev_slug = videos[cur_index - 1].slug else: prev_slug = None if cur_index < videos.count() - 1: next_slug = videos[cur_index + 1].slug else: next_slug = None video_rec = request.user.videoactivity_set.filter(video=video) if video_rec: video_rec = video_rec[0] else: #note student field to be renamed to user, VideoActivity for all users now video_rec = VideoActivity(student=request.user, course=common_page_data['course'], video=video) video_rec.save() course = common_page_data['course'] full_contentsection_list, full_index_list = get_full_contentsection_list( course, filter_children=True) if request.user.is_authenticated(): is_logged_in = 1 else: is_logged_in = 0 key = ('video', video.id) l1items, l2items = get_contentgroup_data(course=course) downloadable_content = get_children(key, l1items, l2items) if video.exam: try: #exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) exam = video.exam display_single = exam.display_single invideo = exam.invideo metadata_dom = parseString( exam.xml_metadata) #The DOM corresponding to the XML metadata video_questions = metadata_dom.getElementsByTagName('video') question_times = {} for video_node in video_questions: video_slug = video_node.getAttribute("url-identifier") if video_slug == "": video_slug = video_node.getAttribute("url_identifier") if video_slug == video.slug: question_children = video_node.getElementsByTagName( "question") times = [] for question in question_children: time = "sec_%s" % question.getAttribute("time") if time not in question_times: question_times[time] = [] question_times[time].append( question.getAttribute("id")) print json.dumps(question_times) except Exam.DoesNotExist: raise Http404 else: sections = ContentSection.objects.getByCourse(course) section = sections[0] # create fake exam as exam template (which is what displays all videos) needs exam data to function # correctly (TODO: Refactor this) exam = Exam( course=course, slug=slug, title=video.title, description="Empty Exam", html_content="", xml_metadata="", due_date='', assessment_type="survey", mode="draft", total_score=0, grade_single=0, grace_period='', partial_credit_deadline='', late_penalty=0, submissions_permitted=0, resubmission_penalty=0, exam_type="survey", autograde=0, display_single=0, invideo=1, section=section, ) exam.live_datetime = video.live_datetime # needed so video shows up question_times = "" videoURL = None thumbnailPath = None if is_storage_local(): videoURL = local_file_server_root() + "/" + str(video.file) thumbnailPath = local_file_server_root( ) + "/" + course.prefix + "/" + course.suffix + "/videos/" + str( video.id if video.mode == 'draft' else video.image.id) + "/jpegs/" elif video.url: videoURL = "http://www.youtube.com/embed/" + ( video.url if video.mode == 'draft' else video.image.url ) + "?autoplay=0&wmode=transparent&fs=0&rel=0&modestbranding=1&showinfo=0&start=0&enablejsapi=1&disablekb=1&" thumbnailPath = "http://" + settings.AWS_STORAGE_BUCKET_NAME + ".s3-website-us-west-2.amazonaws.com/" + course.prefix + "/" + course.suffix + "/videos/" + str( video.id if video.mode == 'draft' else video.image.id) + "/jpegs/" # change from 'videos/view.html' to 'exams/view_exam.html' return render_to_response('exams/view_exam.html', { 'common_page_data': common_page_data, 'video': video, 'video_rec': video_rec, 'videoURL': videoURL, 'thumbnailPath': thumbnailPath, 'prev_slug': prev_slug, 'next_slug': next_slug, 'contentsection_list': full_contentsection_list, 'full_index_list': full_index_list, 'is_logged_in': is_logged_in, 'downloadable_content': downloadable_content, 'json_pre_pop': "{}", 'scores': "{}", 'editable': True, 'single_question': exam.display_single, 'videotest': exam.invideo, 'question_times': json.dumps(question_times), 'allow_submit': True, 'children': downloadable_content, 'exam': exam }, context_instance=RequestContext(request))
def view_csv_grades(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] try: exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 if course.mode == "draft": course = course.image if exam.mode == "draft": exam = exam.image graded_students = ExamScore.objects.filter( course=course, exam=exam).values('student', 'student__username').distinct() fname = course_prefix + "-" + course_suffix + "-" + exam_slug + "-grades-" + datetime.datetime.now( ).strftime("%Y-%m-%d-%H:%M:%S") + ".csv" temp_file_path = FILE_DIR + "/" + fname outfile = open(temp_file_path, "w+") could_not_parse = "" for s in graded_students: #yes, there is sql in a loop here. We'll optimize later score_obj = ExamScore.objects.get(course=course, exam=exam, student=s['student']) outstring = '"%s","%s"\n' % (s['student__username'], score_obj.score) outfile.write(outstring) subscores = ExamScoreField.objects.filter(parent=score_obj) for field in subscores: outstring = '"%s","%s","%s"\n' % ( s['student__username'], field.field_name, str(field.subscore)) outfile.write(outstring) outfile.write("\n") if is_storage_local(): outfile.close() reports_dir = local_storage_root_dir( ) + "/" + course_prefix + "/" + course_suffix + "/reports/" if not default_storage.exists(reports_dir): os.mkdir(reports_dir) copyfile(temp_file_path, reports_dir + fname) file_url = local_file_server_root( ) + "/" + course_prefix + "/" + course_suffix + "/reports/" + fname return HttpResponseRedirect(file_url) else: #write to S3 secure_file_storage = S3BotoStorage( bucket=AWS_SECURE_STORAGE_BUCKET_NAME, access_key=AWS_ACCESS_KEY_ID, secret_key=AWS_SECRET_ACCESS_KEY) s3file = secure_file_storage.open( "/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), 'w') outfile.seek(0) s3file.write(outfile.read()) s3file.close() outfile.close() return HttpResponseRedirect( secure_file_storage.url("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), response_headers={ 'response-content-disposition': 'attachment' }))
def view_submissions_to_grade(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] try: exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 if exam.mode == "draft": exam = exam.image submitters = ExamRecord.objects.filter( exam=exam, complete=True, time_created__lt=exam.grace_period).values('student').distinct() fname = course_prefix + "-" + course_suffix + "-" + exam_slug + "-" + datetime.datetime.now( ).strftime("%Y-%m-%d-%H:%M:%S") + ".csv" temp_file_path = FILE_DIR + "/" + fname outfile = open(temp_file_path, "w+") could_not_parse = "" for s in submitters: #yes, there is sql in a loop here. We'll optimize later latest_sub = ExamRecord.objects.values( 'student__username', 'time_created', 'json_data').filter(exam=exam, time_created__lt=exam.grace_period, student=s['student']).latest('time_created') try: sub_obj = json.loads(latest_sub['json_data']).iteritems() for k, v in sub_obj: outstring = '"%s","%s","%s"\n' % ( latest_sub['student__username'], k, parse_val(v)) outfile.write(outstring) except ValueError: could_not_parse += latest_sub[ 'student__username'] + " " #Don't output if the latest submission was erroneous outfile.write("\n") #if there were items we could not parse if could_not_parse: #write the usernames at the beginning of the file outfile.seek(0) data = outfile.read() outfile.seek(0) outfile.truncate() outfile.write("Could not parse data from the following users: " + could_not_parse + "\n") outfile.write(data) if is_storage_local(): outfile.close() reports_dir = local_storage_root_dir( ) + "/" + course_prefix + "/" + course_suffix + "/reports/" if not default_storage.exists(reports_dir): os.mkdir(reports_dir) copyfile(temp_file_path, reports_dir + fname) file_url = local_file_server_root( ) + "/" + course_prefix + "/" + course_suffix + "/reports/" + fname return HttpResponseRedirect(file_url) else: #write to S3 secure_file_storage = S3BotoStorage( bucket=AWS_SECURE_STORAGE_BUCKET_NAME, access_key=AWS_ACCESS_KEY_ID, secret_key=AWS_SECRET_ACCESS_KEY) s3file = secure_file_storage.open( "/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), 'w') outfile.seek(0) s3file.write(outfile.read()) s3file.close() outfile.close() return HttpResponseRedirect( secure_file_storage.url_monkeypatched( "/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), response_headers={ 'response-content-disposition': 'attachment' }))