def retry_video_download(request): """Clear any video still accidentally marked as in-progress, and restart the download job. """ VideoFile.objects.filter(download_in_progress=True).update( download_in_progress=False, percent_complete=0) force_job("videodownload", "Download Videos") return JsonResponse({})
def start_subtitle_download(request): update_set = simplejson.loads(request.raw_post_data or "{}").get("update_set", "existing") language = simplejson.loads(request.raw_post_data or "{}").get("language", "") # Set subtitle language Settings.set("subtitle_language", language) # Get the json file with all srts request_url = "http://%s/static/data/subtitles/languages/%s_available_srts.json" % (settings.CENTRAL_SERVER_HOST, language) try: r = requests.get(request_url) r.raise_for_status() # will return none if 200, otherwise will raise HTTP error available_srts = set((r.json)["srt_files"]) except ConnectionError: return JsonResponse({"error": "The central server is currently offline."}, status=500) except HTTPError: return JsonResponse({"error": "No subtitles available on central server for language code: %s; aborting." % language}, status=500) if update_set == "existing": videofiles = VideoFile.objects.filter(subtitles_downloaded=False, subtitle_download_in_progress=False) else: videofiles = VideoFile.objects.filter(subtitle_download_in_progress=False) queue_count = 0 for chunk in break_into_chunks(available_srts): queue_count += videofiles.filter(youtube_id__in=chunk).update(flagged_for_subtitle_download=True, subtitles_downloaded=False) if queue_count == 0: return JsonResponse({"info": "There aren't any subtitles available in this language for your currently downloaded videos."}, status=200) force_job("subtitledownload", "Download Subtitles") return JsonResponse({})
def start_video_download(request): """ API endpoint for launching the videodownload job. """ youtube_ids = OrderedSet( simplejson.loads(request.raw_post_data or "{}").get("youtube_ids", [])) # One query per video (slow) video_files_to_create = [ id for id in youtube_ids if not get_object_or_None(VideoFile, youtube_id=id) ] video_files_to_update = youtube_ids - OrderedSet(video_files_to_create) # OK to do bulk_create; cache invalidation triggered via save download VideoFile.objects.bulk_create([ VideoFile(youtube_id=id, flagged_for_download=True) for id in video_files_to_create ]) # One query per chunk for chunk in break_into_chunks(youtube_ids): video_files_needing_model_update = VideoFile.objects.filter( download_in_progress=False, youtube_id__in=chunk).exclude(percent_complete=100) video_files_needing_model_update.update(percent_complete=0, cancel_download=False, flagged_for_download=True) force_job("videodownload", _("Download Videos")) return JsonResponse({})
def retry_video_download(request): """ Clear any video still accidentally marked as in-progress, and restart the download job. """ VideoFile.objects.filter(download_in_progress=True).update(download_in_progress=False, percent_complete=0) force_job("videodownload", _("Download Videos")) return JsonResponse({})
def start_video_download(request): youtube_ids = OrderedSet( simplejson.loads(request.raw_post_data or "{}").get("youtube_ids", [])) video_files_to_create = [ id for id in youtube_ids if not get_object_or_None(VideoFile, youtube_id=id) ] video_files_to_update = youtube_ids - OrderedSet(video_files_to_create) VideoFile.objects.bulk_create([ VideoFile(youtube_id=id, flagged_for_download=True) for id in video_files_to_create ]) for chunk in break_into_chunks(youtube_ids): video_files_needing_model_update = VideoFile.objects.filter( download_in_progress=False, youtube_id__in=chunk).exclude(percent_complete=100) video_files_needing_model_update.update(percent_complete=0, cancel_download=False, flagged_for_download=True) force_job("videodownload", "Download Videos") return JsonResponse({})
def cancel_video_download(request): # clear all download in progress flags, to make sure new downloads will go through VideoFile.objects.all().update(download_in_progress=False) # unflag all video downloads VideoFile.objects.filter(flagged_for_download=True).update(cancel_download=True, flagged_for_download=False, download_in_progress=False) force_job("videodownload", stop=True) return JsonResponse({})
def start_video_download(request): youtube_ids = OrderedSet(simplejson.loads(request.raw_post_data or "{}").get("youtube_ids", [])) video_files_to_create = [id for id in youtube_ids if not get_object_or_None(VideoFile, youtube_id=id)] video_files_to_update = youtube_ids - OrderedSet(video_files_to_create) VideoFile.objects.bulk_create([VideoFile(youtube_id=id, flagged_for_download=True) for id in video_files_to_create]) for chunk in break_into_chunks(youtube_ids): video_files_needing_model_update = VideoFile.objects.filter(download_in_progress=False, youtube_id__in=chunk).exclude(percent_complete=100) video_files_needing_model_update.update(percent_complete=0, cancel_download=False, flagged_for_download=True) force_job("videodownload", "Download Videos") return JsonResponse({})
def cancel_video_download(request): # clear all download in progress flags, to make sure new downloads will go through VideoFile.objects.all().update(download_in_progress=False) # unflag all video downloads VideoFile.objects.filter(flagged_for_download=True).update( cancel_download=True, flagged_for_download=False, download_in_progress=False) force_job("videodownload", stop=True) return JsonResponse({})
def update_videos(request): call_command("videoscan") # Could potentially be very slow, blocking request. force_job("videodownload", _("Download Videos")) # async request hit_max = 5 installed_languages = get_installed_language_packs() languages_to_show = [l['name'] for l in installed_languages[:hit_max]] languages_other = installed_languages[hit_max:] context = update_context(request) context.update({ "video_count": VideoFile.objects.filter(percent_complete=100).count(), "languages": languages_to_show, "other_languages_count": len(languages_other) }) return context
def start_subtitle_download(request): update_set = simplejson.loads(request.raw_post_data or "{}").get("update_set", "existing") language = simplejson.loads(request.raw_post_data or "{}").get("language", "") # Set subtitle language Settings.set("subtitle_language", language) # Get the json file with all srts request_url = "http://%s/static/data/subtitles/languages/%s_available_srts.json" % ( settings.CENTRAL_SERVER_HOST, language) try: r = requests.get(request_url) r.raise_for_status( ) # will return none if 200, otherwise will raise HTTP error available_srts = set((r.json)["srt_files"]) except ConnectionError: return JsonResponse( {"error": "The central server is currently offline."}, status=500) except HTTPError: return JsonResponse( { "error": "No subtitles available on central server for language code: %s; aborting." % language }, status=500) videofiles = VideoFile.objects.filter(subtitle_download_in_progress=False) if update_set == "existing": videofiles = VideoFile.objects.filter(subtitles_downloaded=False) queue_count = 0 for chunk in break_into_chunks(available_srts): queue_count += videofiles.filter(youtube_id__in=chunk).update( flagged_for_subtitle_download=True, subtitles_downloaded=False) if queue_count == 0: return JsonResponse( { "info": "There aren't any subtitles available in this language for your currently downloaded videos." }, status=200) force_job("subtitledownload", "Download Subtitles") return JsonResponse({})
def update(request): call_command("videoscan") # Could potentially be very slow, blocking request. force_job("videodownload", "Download Videos") force_job("subtitledownload", "Download Subtitles") default_language = Settings.get("subtitle_language") or "en" device = Device.get_own_device() zone = device.get_zone() context = { "default_language": default_language, "registered": Settings.get("registered"), "zone_id": zone.id if zone else None, "device_id": device.id, "video_count": VideoFile.objects.filter(percent_complete=100).count(), } return context
def update_videos(request): call_command( "videoscan") # Could potentially be very slow, blocking request. force_job("videodownload", _("Download Videos")) # async request hit_max = 5 installed_languages = get_installed_language_packs() languages_to_show = [l['name'] for l in installed_languages[:hit_max]] languages_other = installed_languages[hit_max:] context = update_context(request) context.update({ "video_count": VideoFile.objects.filter(percent_complete=100).count(), "languages": languages_to_show, "other_languages_count": len(languages_other) }) return context
def start_video_download(request): """ API endpoint for launching the videodownload job. """ youtube_ids = OrderedSet(simplejson.loads(request.raw_post_data or "{}").get("youtube_ids", [])) # One query per video (slow) video_files_to_create = [id for id in youtube_ids if not get_object_or_None(VideoFile, youtube_id=id)] video_files_to_update = youtube_ids - OrderedSet(video_files_to_create) # OK to do bulk_create; cache invalidation triggered via save download VideoFile.objects.bulk_create([VideoFile(youtube_id=id, flagged_for_download=True) for id in video_files_to_create]) # One query per chunk for chunk in break_into_chunks(youtube_ids): video_files_needing_model_update = VideoFile.objects.filter(download_in_progress=False, youtube_id__in=chunk).exclude(percent_complete=100) video_files_needing_model_update.update(percent_complete=0, cancel_download=False, flagged_for_download=True) force_job("videodownload", _("Download Videos")) return JsonResponse({})
def set_as_registered(): force_job("syncmodels", "Secure Sync", "HOURLY") # now launches asynchronously Settings.set("registered", True)
def force_sync(request): """ """ force_job("syncmodels") # now launches asynchronously return JsonResponse({})