def handle(self, *args, **options): if len(args) != 3: raise CommandError("Wrong number of arguments, %d instead of 3" % len(args)) if options['force_local'] and options['force_remote']: raise CommandError("Can't run both local and remote") if not PILImportTest: raise CommandError("Can't continue without having 'PIL' installed") if not numpyImportTest: raise CommandError("Can't continue without having 'numpy' installed") arg_prefix=args[0] arg_suffix=args[1] handle=arg_prefix+"--"+arg_suffix slug=args[2] try: video = Video.objects.get(course__handle=handle, slug=slug, mode='draft') except MultipleObjectsReturned: print "Found multiple videos named \"%s\"" % slug return except Video.DoesNotExist: print "Video \"%s\" not found for handle \"%s\"" % (slug, handle) return if video.file.name == "default": print "Video slug \"%s\" doesn't have a file listed in S3 (name=\"default\")" % slug return if (is_storage_local() or options['force_local']) and not options['force_remote']: media_root = getattr(settings, 'MEDIA_ROOT') local_path = media_root + "/" + video.file.name kelvinator.tasks.kelvinate(local_path, options['target_frames'], options['notify_addr']) print "Kelvination complete: %s" % video.file.name else: kelvinator.tasks.kelvinate.delay(video.file.name, options['target_frames'], options['notify_addr']) print "Kelvination queued (%s): %s" % (instance, video.file.name)
def view_submissions_to_grade(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] try: exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 if exam.mode=="draft": exam = exam.image submitters = ExamRecord.objects.filter(exam=exam, complete=True, time_created__lt=exam.grace_period).values('student').distinct() fname = course_prefix+"-"+course_suffix+"-"+exam_slug+"-"+datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")+".csv" temp_file_path = FILE_DIR+"/"+fname outfile = open(temp_file_path,"w+") could_not_parse = "" for s in submitters: #yes, there is sql in a loop here. We'll optimize later latest_sub = ExamRecord.objects.values('student__username', 'time_created', 'json_data').filter(exam=exam, time_created__lt=exam.grace_period, student=s['student']).latest('time_created') try: sub_obj = json.loads(latest_sub['json_data']).iteritems() for k,v in sub_obj: outstring = '"%s","%s","%s"\n' % (latest_sub['student__username'], k, parse_val(v)) outfile.write(outstring) except ValueError: could_not_parse += latest_sub['student__username']+ " " #Don't output if the latest submission was erroneous outfile.write("\n") #if there were items we could not parse if could_not_parse: #write the usernames at the beginning of the file outfile.seek(0) data=outfile.read() outfile.seek(0) outfile.truncate() outfile.write("Could not parse data from the following users: " + could_not_parse + "\n") outfile.write(data) if is_storage_local(): outfile.close() reports_dir = local_storage_root_dir() + "/" + course_prefix + "/" + course_suffix + "/reports/" if not default_storage.exists(reports_dir): os.mkdir(reports_dir) copyfile(temp_file_path, reports_dir + fname) file_url = local_file_server_root() + "/" + course_prefix + "/" + course_suffix + "/reports/" + fname return HttpResponseRedirect(file_url) else: #write to S3 secure_file_storage = S3BotoStorage(bucket=AWS_SECURE_STORAGE_BUCKET_NAME, access_key=AWS_ACCESS_KEY_ID, secret_key=AWS_SECRET_ACCESS_KEY) s3file = secure_file_storage.open("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname),'w') outfile.seek(0) s3file.write(outfile.read()) s3file.close() outfile.close() return HttpResponseRedirect(secure_file_storage.url_monkeypatched("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), response_headers={'response-content-disposition': 'attachment'}))
def clean_url(self): url = self.cleaned_data["url"] if url and not is_storage_local(): yt_service = gdata.youtube.service.YouTubeService() try: entry = yt_service.GetYouTubeVideoEntry(video_id=url) self.instance.duration = entry.media.duration.seconds except gdata.service.RequestError: raise forms.ValidationError("Invalid YouTube video ID.") return url
def clean_url(self): url = self.cleaned_data['url'] if url and not is_storage_local(): yt_service = gdata.youtube.service.YouTubeService() try: entry = yt_service.GetYouTubeVideoEntry(video_id=url) self.instance.duration = entry.media.duration.seconds except gdata.service.RequestError: raise forms.ValidationError('Invalid YouTube video ID.') return url
def handle(self, *args, **options): # Option processing if len(args) != 2: raise CommandError("Wrong number of arguments, %d instead of 2" % len(args)) if options['force_local'] and options['force_remote']: raise CommandError("Can't run both local and remote.") if not xhtml2pdfImportTest: raise CommandError("Can't issue certificates without python library xhtml2pdf being installed") course_handle = args[0].strip() username = args[1].strip() if len(username) == 0: return # Working object memoization if len(course_handle) == 0: raise CommandError("Bad course handle: '%s'" % course_handle) try: course = Course.objects.get(handle=course_handle, mode='ready') except: raise CommandError("Bad course handle or could not retrieve course '%s'" % course_handle) certificate_info = CourseCertificate.objects.get(course=course, type=options['cert_type']) user = User.objects.get(username=username) profile = UserProfile.objects.get(user=user) # Fire off worker task cert_prefix = '' if (is_storage_local() or options['force_local']) and not options['force_remote']: cert_prefix = getattr(settings, 'MEDIA_ROOT', '') cert_path = certificate_tasks.certify(cert_prefix, course, certificate_info, user) #print "Certification complete: %s" % cert_path print "Certification complete: %s" % certificate_info.dl_link(user) # Attach new certification to user's profile profile.certificates.add(certificate_info) profile.save() print "Certificate attached to profile of %s" % user.username # Send user notifications (if applicable) if options['email_test'] and options['email_user']: raise CommandError("Can't notify both admin and %s" % username) to_email = '(none specified)' if options['email_test']: admins = getattr(settings, 'ADMINS', False) if admins: to_email = admins[0][1] elif options['email_user']: to_email = user.email if notify(certificate_info.type, user.first_name, user.last_name, to_email, course.prefix, course.suffix): print "Notification for %s sent to %s." % (username, to_email) else: print "No notification sent to %s at address %s." % (username, to_email)
def handle(self, *args, **options): # Option processing if len(args) != 2: raise CommandError("Wrong number of arguments, %d instead of 2" % len(args)) if options['force_local'] and options['force_remote']: raise CommandError("Can't run both local and remote.") if not xhtml2pdfImportTest: raise CommandError("Can't issue certificates without python library xhtml2pdf being installed") course_handle = args[0].strip() username = args[1].strip() if len(username) == 0: return # Working object memoization if len(course_handle) == 0: raise CommandError("Bad course handle: '%s'" % course_handle) try: course = Course.objects.get(handle=course_handle, mode='ready') except: raise CommandError("Bad course handle or could not retrieve course '%s'" % course_handle) certificate_info = CourseCertificate.objects.get(course=course, type=options['cert_type']) user = User.objects.get(username=username) profile = UserProfile.objects.get(user=user) # Fire off worker task cert_prefix = '' if (is_storage_local() or options['force_local']) and not options['force_remote']: cert_prefix = getattr(settings, 'MEDIA_ROOT', '') cert_path = certificate_tasks.certify(cert_prefix, course, certificate_info, user) print "Certification complete: %s" % certificate_info.dl_link(user) # Attach new certification to user's profile profile.certificates.add(certificate_info) profile.save() print "Certificate attached to profile of %s" % user.username # Send user notifications (if applicable) if options['email_test'] and options['email_user']: raise CommandError("Can't notify both admin and %s" % username) to_email = '(none specified)' if options['email_test']: admins = getattr(settings, 'ADMINS', False) if admins: to_email = admins[0][1] elif options['email_user']: to_email = user.email if notify(certificate_info.type, user.first_name, user.last_name, to_email, course.prefix, course.suffix): print "Notification for %s sent to %s." % (username, to_email) else: print "No notification sent to %s at address %s." % (username, to_email)
def view_csv_grades(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] try: exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 if course.mode=="draft": course = course.image if exam.mode=="draft": exam = exam.image graded_students = ExamScore.objects.filter(course=course, exam=exam).values('student','student__username').distinct() fname = course_prefix+"-"+course_suffix+"-"+exam_slug+"-grades-"+datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")+".csv" temp_file_path = FILE_DIR+"/"+fname outfile = open(temp_file_path,"w+") could_not_parse = "" for s in graded_students: #yes, there is sql in a loop here. We'll optimize later score_obj = ExamScore.objects.get(course=course, exam=exam, student=s['student']) outstring = '"%s","%s"\n' % (s['student__username'], score_obj.score) outfile.write(outstring) subscores = ExamScoreField.objects.filter(parent=score_obj) for field in subscores: outstring = '"%s","%s","%s"\n' % (s['student__username'], field.field_name, str(field.subscore)) outfile.write(outstring) outfile.write("\n") if is_storage_local(): outfile.close() reports_dir = local_storage_root_dir() + "/" + course_prefix + "/" + course_suffix + "/reports/" if not default_storage.exists(reports_dir): os.mkdir(reports_dir) copyfile(temp_file_path, reports_dir + fname) file_url = local_file_server_root() + "/" + course_prefix + "/" + course_suffix + "/reports/" + fname return HttpResponseRedirect(file_url) else: #write to S3 secure_file_storage = S3BotoStorage(bucket=AWS_SECURE_STORAGE_BUCKET_NAME, access_key=AWS_ACCESS_KEY_ID, secret_key=AWS_SECRET_ACCESS_KEY) s3file = secure_file_storage.open("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname),'w') outfile.seek(0) s3file.write(outfile.read()) s3file.close() outfile.close() return HttpResponseRedirect(secure_file_storage.url("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), response_headers={'response-content-disposition': 'attachment'}))
def handle(self, *args, **options): if len(args) != 3: raise CommandError("Wrong number of arguments, %d instead of 3" % len(args)) if options['force_local'] and options['force_remote']: raise CommandError("Can't run both local and remote") arg_prefix = args[0] arg_suffix = args[1] handle = arg_prefix + "--" + arg_suffix slug = args[2] try: video = Video.objects.get(course__handle=handle, slug=slug, mode='draft') except MultipleObjectsReturned: print "Found multiple videos named \"%s\"" % slug return except Video.DoesNotExist: print "Video \"%s\" not found for handle \"%s\"" % (slug, handle) return if video.file.name == "default": print "Video slug \"%s\" doesn't have a file listed in S3 (name=\"default\")" % slug return # FIXME: after confirming this works, clean these lines up. #where = getattr(settings, 'AWS_ACCESS_KEY_ID', 'local') #if options['force_local']: # where='local' #if options['force_remote']: # where='remote' #if where == 'local': if (is_storage_local() or options['force_local']) and not options['force_remote']: media_root = local_storage_root_dir() local_path = media_root + "/" + video.file.name kelvinator.tasks.kelvinate(local_path, options['target_frames'], options['notify_addr']) print "Kelvination complete: %s" % video.file.name else: kelvinator.tasks.kelvinate.delay(video.file.name, options['target_frames'], options['notify_addr']) print "Kelvination queued (%s): %s" % (instance, video.file.name)
def handle(self, *args, **options): if len(args) != 3: raise CommandError("Wrong number of arguments, %d instead of 3" % len(args)) if options['force_local'] and options['force_remote']: raise CommandError("Can't run both local and remote") if not PILImportTest: raise CommandError("Can't continue without having 'PIL' installed") if not numpyImportTest: raise CommandError( "Can't continue without having 'numpy' installed") arg_prefix = args[0] arg_suffix = args[1] handle = arg_prefix + "--" + arg_suffix slug = args[2] try: video = Video.objects.get(course__handle=handle, slug=slug, mode='draft') except MultipleObjectsReturned: print "Found multiple videos named \"%s\"" % slug return except Video.DoesNotExist: print "Video \"%s\" not found for handle \"%s\"" % (slug, handle) return if video.file.name == "default": print "Video slug \"%s\" doesn't have a file listed in S3 (name=\"default\")" % slug return if (is_storage_local() or options['force_local']) and not options['force_remote']: media_root = getattr(settings, 'MEDIA_ROOT') local_path = media_root + "/" + video.file.name kelvinator.tasks.kelvinate(local_path, options['target_frames'], options['notify_addr']) print "Kelvination complete: %s" % video.file.name else: kelvinator.tasks.kelvinate.delay(video.file.name, options['target_frames'], options['notify_addr']) print "Kelvination queued (%s): %s" % (instance, video.file.name)
def resize(store_path_raw, target_raw, notify_addr=None): """ Given a video path, scale it down and save the result alongside the original video. So we can provide different download options. Preset is either "small" or "large". """ notify_buf = [] infoLog( notify_buf, "Resize: converting %s version of %s" % (target_raw, store_path_raw)) target = target_raw.lower() if target not in sizes.keys(): VideoError("Target size \"%s\" not supported" % target) (store_path, course_prefix, course_suffix, video_id, video_file) = splitpath(store_path_raw) store_loc = 'remote' if is_storage_local(): store_loc = 'local' work_dir = None try: (work_dir, smaller_dir) = create_working_dirs("resize", notify_buf, target) get_video(notify_buf, work_dir, video_file, store_path) do_resize(notify_buf, work_dir, smaller_dir, video_file, target) upload(notify_buf, smaller_dir, target, course_prefix, course_suffix, video_id, video_file, store_loc) except: if work_dir: cleanup_working_dir(notify_buf, work_dir) notify("Resize (%s)" % target, notify_buf, notify_addr, course_prefix, course_suffix, video_file, store_path) raise cleanup_working_dir(notify_buf, work_dir) notify("Resize (%s)" % target, notify_buf, notify_addr, course_prefix, course_suffix, video_file, store_path)
def handle(self, *args, **options): if len(args) != 4: raise CommandError("Wrong number of arguments, %d instead of 4" % len(args)) if options['force_local'] and options['force_remote']: raise CommandError("Can't run both local and remote") target=args[0] arg_prefix=args[1] arg_suffix=args[2] handle=arg_prefix+"--"+arg_suffix slug=args[3] try: video = Video.objects.get(course__handle=handle, slug=slug, mode='draft') except MultipleObjectsReturned: print "Found multiple videos named \"%s\"" % slug return except Video.DoesNotExist: print "Video \"%s\" not found for handle \"%s\"" % (slug, handle) return if video.file.name == "default": print "Video slug \"%s\" doesn't have a file listed in S3 (name=\"default\")" % slug return # FIXME: after confirming this works, remove these lines #where = getattr(settings, 'AWS_ACCESS_KEY_ID', 'local') #if options['force_local']: # where='local' #if options['force_remote']: # where='remote' #if where == 'local': if (is_storage_local() or options['force_local']) and not options['force_remote']: media_root = getattr(settings, 'MEDIA_ROOT') local_path = media_root + "/" + video.file.name kelvinator.tasks.resize(local_path, target, options['notify_addr']) print "Resize complete: %s" % video.file.name else: kelvinator.tasks.resize.delay(video.file.name, target, options['notify_addr']) print "Resize queued (%s): %s" % (instance, video.file.name)
def resize(store_path_raw, target_raw, notify_addr=None): """ Given a video path, scale it down and save the result alongside the original video. So we can provide different download options. Preset is either "small" or "large". """ notify_buf = [] infoLog(notify_buf, "Resize: converting %s version of %s" % (target_raw, store_path_raw)) target = target_raw.lower() if target not in sizes.keys(): VideoError("Target size \"%s\" not supported" % target) (store_path, course_prefix, course_suffix, video_id, video_file) = splitpath(store_path_raw) store_loc = 'remote' if is_storage_local(): store_loc = 'local' work_dir = None try: (work_dir, smaller_dir) = create_working_dirs("resize", notify_buf, target) get_video(notify_buf, work_dir, video_file, store_path) do_resize(notify_buf, work_dir, smaller_dir, video_file, target) upload(notify_buf, smaller_dir, target, course_prefix, course_suffix, video_id, video_file, store_loc) except: if work_dir: cleanup_working_dir(notify_buf, work_dir) notify("Resize (%s)" % target, notify_buf, notify_addr, course_prefix, course_suffix, video_file, store_path) raise cleanup_working_dir(notify_buf, work_dir) notify("Resize (%s)" % target, notify_buf, notify_addr, course_prefix, course_suffix, video_file, store_path)
def upload(request): course_prefix = request.POST.get("course_prefix") course_suffix = request.POST.get("course_suffix") exam_id = request.POST.get("exam_id",'') common_page_data = get_common_page_data(request, course_prefix, course_suffix) data = {'common_page_data': common_page_data} if request.method == 'POST': request.session['video_privacy'] = request.POST.get("video_privacy") # Need partial instance with course for form slug validation new_video = Video(course=common_page_data['course']) form = S3UploadForm(request.POST, request.FILES, course=common_page_data['course'], instance=new_video) if form.is_valid(): new_video.index = new_video.section.getNextIndex() new_video.mode = 'draft' new_video.handle = course_prefix + "--" + course_suffix if exam_id: try: exam = Exam.objects.get(id=exam_id) except Exam.DoesNotExist: return HttpResponseBadRequest("The exam you wanted to link to this video was not found!") new_video.exam = exam exam.live_datetime = new_video.live_datetime exam.save() if exam.image: exam.image.live_datetime = new_video.live_datetime exam.image.save() # Bit of jiggery pokery to so that the id is set when the upload_path function is called. # Now storing file with id appended to the file path so that thumbnail and associated manifest files # are easily associated with the video by putting them all in the same directory. new_video.file = None new_video.save() new_video.file = form.cleaned_data['file'] new_video.save() kelvinator.tasks.duration.delay(new_video) new_video.create_ready_instance() # kick off remote jobs kelvinator.tasks.kelvinate.delay(new_video.file.name) # kelvinator.tasks.resize.delay(new_video.file.name, "large") # kelvinator.tasks.resize.delay(new_video.file.name, "small") if is_storage_local(): return redirect('courses.videos.views.list', course_prefix, course_suffix) if new_video.url: return redirect('courses.videos.views.list', course_prefix, course_suffix) authUrl = GetOAuth2Url(request, new_video) #eventually should store an access token, so they don't have to give permission everytime return redirect(authUrl) # return redirect("http://" + request.META['HTTP_HOST']) else: form = S3UploadForm(course=common_page_data['course']) data['form'] = form return render_to_response('videos/s3upload.html', data, context_instance=RequestContext(request))
def view_submissions_to_grade(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] try: exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 if exam.mode == "draft": exam = exam.image submitters = ExamRecord.objects.filter( exam=exam, complete=True, time_created__lt=exam.grace_period).values('student').distinct() fname = course_prefix + "-" + course_suffix + "-" + exam_slug + "-" + datetime.datetime.now( ).strftime("%Y-%m-%d-%H:%M:%S") + ".csv" temp_file_path = FILE_DIR + "/" + fname outfile = open(temp_file_path, "w+") could_not_parse = "" for s in submitters: #yes, there is sql in a loop here. We'll optimize later latest_sub = ExamRecord.objects.values( 'student__username', 'time_created', 'json_data').filter(exam=exam, time_created__lt=exam.grace_period, student=s['student']).latest('time_created') try: sub_obj = json.loads(latest_sub['json_data']).iteritems() for k, v in sub_obj: outstring = '"%s","%s","%s"\n' % ( latest_sub['student__username'], k, parse_val(v)) outfile.write(outstring) except ValueError: could_not_parse += latest_sub[ 'student__username'] + " " #Don't output if the latest submission was erroneous outfile.write("\n") #if there were items we could not parse if could_not_parse: #write the usernames at the beginning of the file outfile.seek(0) data = outfile.read() outfile.seek(0) outfile.truncate() outfile.write("Could not parse data from the following users: " + could_not_parse + "\n") outfile.write(data) if is_storage_local(): outfile.close() reports_dir = local_storage_root_dir( ) + "/" + course_prefix + "/" + course_suffix + "/reports/" if not default_storage.exists(reports_dir): os.mkdir(reports_dir) copyfile(temp_file_path, reports_dir + fname) file_url = local_file_server_root( ) + "/" + course_prefix + "/" + course_suffix + "/reports/" + fname return HttpResponseRedirect(file_url) else: #write to S3 secure_file_storage = S3BotoStorage( bucket=AWS_SECURE_STORAGE_BUCKET_NAME, access_key=AWS_ACCESS_KEY_ID, secret_key=AWS_SECRET_ACCESS_KEY) s3file = secure_file_storage.open( "/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), 'w') outfile.seek(0) s3file.write(outfile.read()) s3file.close() outfile.close() return HttpResponseRedirect( secure_file_storage.url_monkeypatched( "/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), response_headers={ 'response-content-disposition': 'attachment' }))
def view(request, course_prefix, course_suffix, slug): common_page_data = request.common_page_data try: #getByCourse takes care of checking for draft vs live, is_deleted and live times video = Video.objects.getByCourse(course=common_page_data['course']).get(slug=slug) except Video.DoesNotExist: raise Http404 if not common_page_data['is_course_admin']: visit_log = PageVisitLog( course = common_page_data['ready_course'], user = request.user, page_type= 'video', object_id = str(video.id), ) visit_log.save() if not 'video_quiz_mode' in request.session: #Default to include quizzes in viewing videos request.session['video_quiz_mode'] = "quizzes included" videos = Video.objects.getByCourse(course=common_page_data['course']) #Get index of current video cur_index = None #just code safety for index, item in enumerate(videos): if item == video: cur_index = index break #code safety next_slug = None prev_slug = None if cur_index is not None: if cur_index > 0: prev_slug = videos[cur_index-1].slug else: prev_slug = None if cur_index < videos.count() - 1: next_slug = videos[cur_index+1].slug else: next_slug = None video_rec = request.user.videoactivity_set.filter(video=video) if video_rec: video_rec = video_rec[0] else: #note student field to be renamed to user, VideoActivity for all users now video_rec = VideoActivity(student=request.user, course=common_page_data['course'], video=video) video_rec.save() course = common_page_data['course'] full_contentsection_list, full_index_list = get_full_contentsection_list(course, filter_children=True) if request.user.is_authenticated(): is_logged_in = 1 else: is_logged_in = 0 key = ('video', video.id) l1items, l2items = get_contentgroup_data(course=course) downloadable_content = get_children(key, l1items, l2items) if video.exam: try: #exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) exam = video.exam display_single = exam.display_single invideo = exam.invideo metadata_dom = parseString(exam.xml_metadata) #The DOM corresponding to the XML metadata video_questions = metadata_dom.getElementsByTagName('video') question_times = {} for video_node in video_questions: video_slug = video_node.getAttribute("url-identifier") if video_slug == "": video_slug = video_node.getAttribute("url_identifier") if video_slug == video.slug: question_children = video_node.getElementsByTagName("question") times = [] for question in question_children: time = "sec_%s" % question.getAttribute("time") if time not in question_times: question_times[time] = [] question_times[time].append(question.getAttribute("id")) print json.dumps(question_times) except Exam.DoesNotExist: raise Http404 else: sections = ContentSection.objects.getByCourse(course) section = sections[0] # create fake exam as exam template (which is what displays all videos) needs exam data to function # correctly (TODO: Refactor this) exam = Exam(course=course, slug=slug, title=video.title, description="Empty Exam", html_content="", xml_metadata="", due_date='', assessment_type="survey", mode="draft", total_score=0, grade_single=0, grace_period='', partial_credit_deadline='', late_penalty=0, submissions_permitted=0, resubmission_penalty=0, exam_type="survey", autograde=0, display_single=0, invideo=1, section=section,) exam.live_datetime = video.live_datetime # needed so video shows up question_times = "" videoURL = None thumbnailPath = None if is_storage_local(): videoURL = local_file_server_root() + "/" + str(video.file) thumbnailPath = local_file_server_root() + "/" + course.prefix + "/" + course.suffix + "/videos/" + str(video.id if video.mode == 'draft' else video.image.id) + "/jpegs/" elif video.url: videoURL = "http://www.youtube.com/embed/" + (video.url if video.mode == 'draft' else video.image.url) + "?autoplay=0&wmode=transparent&fs=0&rel=0&modestbranding=1&showinfo=0&start=0&enablejsapi=1&disablekb=1&" thumbnailPath = "http://" + settings.AWS_STORAGE_BUCKET_NAME + ".s3-website-us-west-2.amazonaws.com/" + course.prefix + "/" + course.suffix + "/videos/" + str(video.id if video.mode == 'draft' else video.image.id) + "/jpegs/" # change from 'videos/view.html' to 'exams/view_exam.html' return render_to_response('exams/view_exam.html', { 'common_page_data': common_page_data, 'video': video, 'video_rec': video_rec, 'videoURL': videoURL, 'thumbnailPath': thumbnailPath, 'prev_slug': prev_slug, 'next_slug': next_slug, 'contentsection_list': full_contentsection_list, 'full_index_list': full_index_list, 'is_logged_in': is_logged_in, 'downloadable_content':downloadable_content, 'json_pre_pop':"{}", 'scores':"{}", 'editable':True, 'single_question':exam.display_single, 'videotest':exam.invideo, 'question_times':json.dumps(question_times), 'allow_submit':True, 'children': downloadable_content, 'exam':exam }, context_instance=RequestContext(request))
def view_csv_grades(request, course_prefix, course_suffix, exam_slug): course = request.common_page_data['course'] try: exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) except Exam.DoesNotExist: raise Http404 if course.mode == "draft": course = course.image if exam.mode == "draft": exam = exam.image graded_students = ExamScore.objects.filter( course=course, exam=exam).values('student', 'student__username').distinct() fname = course_prefix + "-" + course_suffix + "-" + exam_slug + "-grades-" + datetime.datetime.now( ).strftime("%Y-%m-%d-%H:%M:%S") + ".csv" temp_file_path = FILE_DIR + "/" + fname outfile = open(temp_file_path, "w+") could_not_parse = "" for s in graded_students: #yes, there is sql in a loop here. We'll optimize later score_obj = ExamScore.objects.get(course=course, exam=exam, student=s['student']) outstring = '"%s","%s"\n' % (s['student__username'], score_obj.score) outfile.write(outstring) subscores = ExamScoreField.objects.filter(parent=score_obj) for field in subscores: outstring = '"%s","%s","%s"\n' % ( s['student__username'], field.field_name, str(field.subscore)) outfile.write(outstring) outfile.write("\n") if is_storage_local(): outfile.close() reports_dir = local_storage_root_dir( ) + "/" + course_prefix + "/" + course_suffix + "/reports/" if not default_storage.exists(reports_dir): os.mkdir(reports_dir) copyfile(temp_file_path, reports_dir + fname) file_url = local_file_server_root( ) + "/" + course_prefix + "/" + course_suffix + "/reports/" + fname return HttpResponseRedirect(file_url) else: #write to S3 secure_file_storage = S3BotoStorage( bucket=AWS_SECURE_STORAGE_BUCKET_NAME, access_key=AWS_ACCESS_KEY_ID, secret_key=AWS_SECRET_ACCESS_KEY) s3file = secure_file_storage.open( "/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), 'w') outfile.seek(0) s3file.write(outfile.read()) s3file.close() outfile.close() return HttpResponseRedirect( secure_file_storage.url("/%s/%s/reports/exams/%s" % (course_prefix, course_suffix, fname), response_headers={ 'response-content-disposition': 'attachment' }))
def upload(request): course_prefix = request.POST.get("course_prefix") course_suffix = request.POST.get("course_suffix") exam_id = request.POST.get("exam_id", "") common_page_data = get_common_page_data(request, course_prefix, course_suffix) data = {"common_page_data": common_page_data} if request.method == "POST": request.session["video_privacy"] = request.POST.get("video_privacy") # Need partial instance with course for form slug validation new_video = Video(course=common_page_data["course"]) form = S3UploadForm(request.POST, request.FILES, course=common_page_data["course"], instance=new_video) if form.is_valid(): new_video.index = new_video.section.getNextIndex() new_video.mode = "draft" new_video.handle = course_prefix + "--" + course_suffix if exam_id: try: exam = Exam.objects.get(id=exam_id) except Exam.DoesNotExist: return HttpResponseBadRequest("The exam you wanted to link to this video was not found!") new_video.exam = exam exam.live_datetime = new_video.live_datetime exam.save() if exam.image: exam.image.live_datetime = new_video.live_datetime exam.image.save() # Bit of jiggery pokery to so that the id is set when the upload_path function is called. # Now storing file with id appended to the file path so that thumbnail and associated manifest files # are easily associated with the video by putting them all in the same directory. new_video.file = None new_video.save() new_video.file = form.cleaned_data["file"] new_video.save() kelvinator.tasks.duration.delay(new_video) new_video.create_ready_instance() # kick off remote jobs kelvinator.tasks.kelvinate.delay(new_video.file.name) # kelvinator.tasks.resize.delay(new_video.file.name, "large") # kelvinator.tasks.resize.delay(new_video.file.name, "small") if is_storage_local(): return redirect("courses.videos.views.list", course_prefix, course_suffix) if new_video.url: return redirect("courses.videos.views.list", course_prefix, course_suffix) authUrl = GetOAuth2Url(request, new_video) # eventually should store an access token, so they don't have to give permission everytime return redirect(authUrl) # return redirect("http://" + request.META['HTTP_HOST']) else: form = S3UploadForm(course=common_page_data["course"]) data["form"] = form return render_to_response("videos/s3upload.html", data, context_instance=RequestContext(request))
def get_common_page_data(request, prefix, suffix): ready_course = Course.objects.get(handle=prefix+"--"+suffix, mode='ready') draft_course = Course.objects.get(handle=prefix+"--"+suffix, mode='draft') course_mode = 'ready' course = ready_course prefix = course.prefix suffix = course.suffix can_switch_mode = False is_course_admin = False is_course_member = False user_groups = request.user.groups.all() #logger.info("here") for g in user_groups: if g.id == course.student_group_id: is_course_member = True break if g.id == course.instructor_group_id: can_switch_mode = True is_course_admin = True is_course_member = True break if g.id == course.tas_group_id: can_switch_mode = True is_course_admin = True is_course_member = True break if g.id == course.readonly_tas_group_id: can_switch_mode = True is_course_member = True break if can_switch_mode and ('course_mode' in request.session) and (request.session['course_mode'] == 'draft'): course_mode = 'draft' course = draft_course # View mode if course_mode == 'draft': view_mode = 'edit' if request.GET.get('view_mode') and request.GET.get('view_mode') == 'preview': view_mode = 'view' else: view_mode = 'view' # Course info pages course_info_pages = [] for page in AdditionalPage.objects.getByCourseAndMenuSlug(course=course, menu_slug='course_info').all(): if view_mode == 'edit' or page.description: course_info_pages.append(page) current_datetime = datetime.datetime.now() effective_current_datetime = current_datetime page_data = { 'request': request, 'course': course, 'ready_course': ready_course, 'draft_course': draft_course, 'course_prefix':prefix, 'course_suffix':suffix, 'course_mode':course_mode, 'can_switch_mode':can_switch_mode, 'is_course_admin':is_course_admin, 'is_course_member':is_course_member, 'view_mode': view_mode, 'course_info_pages':course_info_pages, 'view_mode': view_mode, 'current_datetime':current_datetime, 'effective_current_datetime':effective_current_datetime, 'aws_storage_bucket_name':AWS_STORAGE_BUCKET_NAME, 'is_local_storage':is_storage_local(), } return page_data
def view(request, course_prefix, course_suffix, slug): common_page_data = request.common_page_data try: #getByCourse takes care of checking for draft vs live, is_deleted and live times video = Video.objects.getByCourse( course=common_page_data['course']).get(slug=slug) except Video.DoesNotExist: raise Http404 if not common_page_data['is_course_admin']: visit_log = PageVisitLog( course=common_page_data['ready_course'], user=request.user, page_type='video', object_id=str(video.id), ) visit_log.save() if not 'video_quiz_mode' in request.session: #Default to include quizzes in viewing videos request.session['video_quiz_mode'] = "quizzes included" videos = Video.objects.getByCourse(course=common_page_data['course']) #Get index of current video cur_index = None #just code safety for index, item in enumerate(videos): if item == video: cur_index = index break #code safety next_slug = None prev_slug = None if cur_index is not None: if cur_index > 0: prev_slug = videos[cur_index - 1].slug else: prev_slug = None if cur_index < videos.count() - 1: next_slug = videos[cur_index + 1].slug else: next_slug = None video_rec = request.user.videoactivity_set.filter(video=video) if video_rec: video_rec = video_rec[0] else: #note student field to be renamed to user, VideoActivity for all users now video_rec = VideoActivity(student=request.user, course=common_page_data['course'], video=video) video_rec.save() course = common_page_data['course'] full_contentsection_list, full_index_list = get_full_contentsection_list( course, filter_children=True) if request.user.is_authenticated(): is_logged_in = 1 else: is_logged_in = 0 key = ('video', video.id) l1items, l2items = get_contentgroup_data(course=course) downloadable_content = get_children(key, l1items, l2items) if video.exam: try: #exam = Exam.objects.get(course=course, is_deleted=0, slug=exam_slug) exam = video.exam display_single = exam.display_single invideo = exam.invideo metadata_dom = parseString( exam.xml_metadata) #The DOM corresponding to the XML metadata video_questions = metadata_dom.getElementsByTagName('video') question_times = {} for video_node in video_questions: video_slug = video_node.getAttribute("url-identifier") if video_slug == "": video_slug = video_node.getAttribute("url_identifier") if video_slug == video.slug: question_children = video_node.getElementsByTagName( "question") times = [] for question in question_children: time = "sec_%s" % question.getAttribute("time") if time not in question_times: question_times[time] = [] question_times[time].append( question.getAttribute("id")) print json.dumps(question_times) except Exam.DoesNotExist: raise Http404 else: sections = ContentSection.objects.getByCourse(course) section = sections[0] # create fake exam as exam template (which is what displays all videos) needs exam data to function # correctly (TODO: Refactor this) exam = Exam( course=course, slug=slug, title=video.title, description="Empty Exam", html_content="", xml_metadata="", due_date='', assessment_type="survey", mode="draft", total_score=0, grade_single=0, grace_period='', partial_credit_deadline='', late_penalty=0, submissions_permitted=0, resubmission_penalty=0, exam_type="survey", autograde=0, display_single=0, invideo=1, section=section, ) exam.live_datetime = video.live_datetime # needed so video shows up question_times = "" videoURL = None thumbnailPath = None if is_storage_local(): videoURL = local_file_server_root() + "/" + str(video.file) thumbnailPath = local_file_server_root( ) + "/" + course.prefix + "/" + course.suffix + "/videos/" + str( video.id if video.mode == 'draft' else video.image.id) + "/jpegs/" elif video.url: videoURL = "http://www.youtube.com/embed/" + ( video.url if video.mode == 'draft' else video.image.url ) + "?autoplay=0&wmode=transparent&fs=0&rel=0&modestbranding=1&showinfo=0&start=0&enablejsapi=1&disablekb=1&" thumbnailPath = "http://" + settings.AWS_STORAGE_BUCKET_NAME + ".s3-website-us-west-2.amazonaws.com/" + course.prefix + "/" + course.suffix + "/videos/" + str( video.id if video.mode == 'draft' else video.image.id) + "/jpegs/" # change from 'videos/view.html' to 'exams/view_exam.html' return render_to_response('exams/view_exam.html', { 'common_page_data': common_page_data, 'video': video, 'video_rec': video_rec, 'videoURL': videoURL, 'thumbnailPath': thumbnailPath, 'prev_slug': prev_slug, 'next_slug': next_slug, 'contentsection_list': full_contentsection_list, 'full_index_list': full_index_list, 'is_logged_in': is_logged_in, 'downloadable_content': downloadable_content, 'json_pre_pop': "{}", 'scores': "{}", 'editable': True, 'single_question': exam.display_single, 'videotest': exam.invideo, 'question_times': json.dumps(question_times), 'allow_submit': True, 'children': downloadable_content, 'exam': exam }, context_instance=RequestContext(request))