def update_json(youtube_id, lang_code, downloaded, api_response, time_of_attempt): """Update language_srt_map to reflect download status""" # Open JSON file filepath = get_lang_map_filepath(lang_code) try: language_srt_map = json.loads(open(filepath).read()) except Exception as e: logging.error( "Something went wrong while trying to open the json file (%s): %s" % (filepath, e)) return False # create updated entry entry = language_srt_map[youtube_id] entry["downloaded"] = downloaded entry["api_response"] = api_response entry["last_attempt"] = time_of_attempt if api_response == "success": entry["last_success"] = time_of_attempt # update full-size JSON with new information language_srt_map[youtube_id].update(entry) # write it to file logging.info("File updated.") json_file = open(filepath, "wb") json_file.write(json.dumps(language_srt_map)) json_file.close() return True
def save(self, update_userlog=True, *args, **kwargs): if not kwargs.get("imported", False): self.full_clean() # Compute learner status if self.attempts > 20 and not self.complete: self.struggling = True already_complete = self.complete self.complete = (self.streak_progress >= 100) if not already_complete and self.complete: self.struggling = False self.completion_timestamp = datetime.now() self.attempts_before_completion = self.attempts # Tell logins that they are still active (ignoring validation failures). # TODO(bcipolli): Could log exercise information in the future. if update_userlog: try: UserLog.update_user_activity( self.user, activity_type="login", update_datetime=(self.completion_timestamp or datetime.now()), language=self.language) except ValidationError as e: logging.error( "Failed to update userlog during exercise: %s" % e) super(ExerciseLog, self).save(*args, **kwargs)
def save(self, update_userlog=True, *args, **kwargs): # To deal with backwards compatibility, # check video_id, whether imported or not. if not self.video_id: assert kwargs.get("imported", False), "video_id better be set by internal code." assert self.youtube_id, "If not video_id, you better have set youtube_id!" self.video_id = i18n.get_video_id(self.youtube_id) or self.youtube_id # for unknown videos, default to the youtube_id if not kwargs.get("imported", False): self.full_clean() # Compute learner status already_complete = self.complete self.complete = (self.points >= VideoLog.POINTS_PER_VIDEO) if not already_complete and self.complete: self.completion_timestamp = datetime.now() # Tell logins that they are still active (ignoring validation failures). # TODO(bcipolli): Could log video information in the future. if update_userlog: try: UserLog.update_user_activity(self.user, activity_type="login", update_datetime=(self.completion_timestamp or datetime.now()), language=self.language) except ValidationError as e: logging.error("Failed to update userlog during video: %s" % e) super(VideoLog, self).save(*args, **kwargs)
def set_cached_password(cls, user, raw_password): assert user.id, "Your user must have an ID before calling this function." if not cls.is_enabled(): # Must delete, to make sure we don't get out of sync. cls.invalidate_cached_password(user=user) else: try: # Set the cached password. n_cached_iters = cls.iters_for_user_type(user) # TODO(bcipolli) Migrate this to an extended django class # that uses get_or_initialize cached_password = get_object_or_None( cls, user=user) or cls(user=user) cached_password.password = crypt(raw_password, iterations=n_cached_iters) cached_password.save() logging.debug( "Set cached password for user=%s; iterations=%d" % (user.username, n_cached_iters)) except Exception as e: # If we fail to create a cache item... just keep going--functionality # can still move forward. logging.error(e)
def download_subtitle(youtube_id, lang_code, format="srt"): """Return subtitles for YouTube ID in language specified. Return False if they do not exist. Update local JSON accordingly.""" assert format == "srt", "We only support srt download at the moment." api_info_map = json.loads( open(settings.SUBTITLES_DATA_ROOT + SRTS_JSON_FILENAME).read() ) # get amara id amara_code = api_info_map.get(youtube_id).get("amara_code") # make request # Please see http://amara.readthedocs.org/en/latest/api.html base_url = "https://amara.org/api2/partners/videos" r = make_request(headers, "%s/%s/languages/%s/subtitles/?format=srt" % ( base_url, amara_code, lang_code)) if isinstance(r, basestring): return r else: # return the subtitle text, replacing empty subtitle lines with # spaces to make the FLV player happy try: r.encoding = "UTF-8" response = (r.text or u"") \ .replace("\n\n\n", "\n \n\n") \ .replace("\r\n\r\n\r\n", "\r\n \r\n\r\n") except Exception as e: logging.error(e) response = "client-error" return response
def zip_language_packs(lang_codes=None): """Zip up and expose all language packs converts all into ietf """ lang_codes = lang_codes or os.listdir(LOCALE_ROOT) lang_codes = [lcode_to_ietf(lc) for lc in lang_codes] logging.info("Zipping up %d language pack(s)" % len(lang_codes)) for lang_code_ietf in lang_codes: lang_code_django = lcode_to_django_dir(lang_code_ietf) lang_locale_path = os.path.join(LOCALE_ROOT, lang_code_django) if not os.path.exists(lang_locale_path): logging.warn("Unexpectedly skipping missing directory: %s" % lang_code_django) elif not os.path.isdir(lang_locale_path): logging.error("Skipping language where a file exists where a directory was expected: %s" % lang_code_django) # Create a zipfile for this language zip_filepath = get_language_pack_filepath(lang_code_ietf) ensure_dir(os.path.dirname(zip_filepath)) logging.info("Creating zip file in %s" % zip_filepath) z = zipfile.ZipFile(zip_filepath, 'w', zipfile.ZIP_DEFLATED) # Get every single file in the directory and zip it up for metadata_file in glob.glob('%s/*.json' % lang_locale_path): z.write(os.path.join(lang_locale_path, metadata_file), arcname=os.path.basename(metadata_file)) srt_dirpath = get_srt_path(lang_code_django) for srt_file in glob.glob(os.path.join(srt_dirpath, "*.srt")): z.write(srt_file, arcname=os.path.join("subtitles", os.path.basename(srt_file))) z.close() logging.info("Done.")
def handle(self, *args, **options): # Get the CSV data, either from a recent cache_file # or from the internet cache_dir = settings.MEDIA_ROOT cache_file = os.path.join(cache_dir, "dubbed_videos.csv") if os.path.exists(cache_file) and datediff( datetime.datetime.now(), datetime.datetime.fromtimestamp(os.path.getctime(cache_file)), units="days") <= 14.0: # Use cached data to generate the video map csv_data = open(cache_file, "r").read() (video_map, _) = generate_dubbed_video_mappings(csv_data=csv_data) else: # Use cached data to generate the video map (video_map, csv_data) = generate_dubbed_video_mappings() try: ensure_dir(cache_dir) with open(cache_file, "w") as fp: fp.write(csv_data) except Exception as e: logging.error( "Failed to make a local cache of the CSV data: %s" % e) # Now we've built the map. Save it. out_file = DUBBED_VIDEOS_MAPPING_FILE ensure_dir(os.path.dirname(out_file)) logging.info("Saving data to %s" % out_file) with open(out_file, "w") as fp: json.dump(video_map, fp) logging.info("Done.")
def download_srt_from_3rd_party(*args, **kwargs): """Download subtitles specified by command line args""" lang_code = kwargs.get("lang_code", None) # if language specified, do those, if not do all if lang_code: srt_list_path = get_lang_map_filepath(lang_code) try: videos = json.loads(open(srt_list_path).read()) except: raise LanguageCodeDoesNotExist(lang_code) download_if_criteria_met(videos, *args, **kwargs) else: for filename in get_all_download_status_files(): try: videos = json.loads(open(filename).read()) except Exception as e: logging.error(e) raise CommandError( "Unable to open %s. The file might be corrupted. Please re-run the generate_subtitle_map command to regenerate it." % filename) try: kwargs["lang_code"] = os.path.basename(filename).split("_")[0] download_if_criteria_met(videos, *args, **kwargs) except Exception as e: logging.error(e) raise CommandError( "Error while downloading language srts: %s" % e)
def update_json(youtube_id, lang_code, downloaded, api_response, time_of_attempt): """Update language_srt_map to reflect download status""" # Open JSON file filepath = get_lang_map_filepath(lang_code) try: language_srt_map = json.loads(open(filepath).read()) except Exception as e: logging.error("Something went wrong while trying to open the json file (%s): %s" % (filepath, e)) return False # create updated entry entry = language_srt_map[youtube_id] entry["downloaded"] = downloaded entry["api_response"] = api_response entry["last_attempt"] = time_of_attempt if api_response == "success": entry["last_success"] = time_of_attempt # update full-size JSON with new information language_srt_map[youtube_id].update(entry) # write it to file logging.info("File updated.") json_file = open(filepath, "wb") json_file.write(json.dumps(language_srt_map)) json_file.close() return True
def handle(self, *args, **options): # Get the CSV data, either from a recent cache_file # or from the internet cache_dir = settings.MEDIA_ROOT cache_file = os.path.join(cache_dir, "dubbed_videos.csv") if os.path.exists(cache_file) and datediff(datetime.datetime.now(), datetime.datetime.fromtimestamp(os.path.getctime(cache_file)), units="days") <= 14.0: # Use cached data to generate the video map csv_data = open(cache_file, "r").read() (video_map, _) = generate_dubbed_video_mappings(csv_data=csv_data) else: # Use cached data to generate the video map (video_map, csv_data) = generate_dubbed_video_mappings() try: ensure_dir(cache_dir) with open(cache_file, "w") as fp: fp.write(csv_data) except Exception as e: logging.error("Failed to make a local cache of the CSV data: %s" % e) # Now we've built the map. Save it. out_file = DUBBED_VIDEOS_MAPPING_FILE ensure_dir(os.path.dirname(out_file)) logging.info("Saving data to %s" % out_file) with open(out_file, "w") as fp: json.dump(video_map, fp) logging.info("Done.")
def download_srt_from_3rd_party(*args, **kwargs): """Download subtitles specified by command line args""" lang_code = kwargs.get("lang_code", None) # if language specified, do those, if not do all if lang_code: srt_list_path = get_lang_map_filepath(lang_code) try: videos = json.loads(open(srt_list_path).read()) except: raise LanguageCodeDoesNotExist(lang_code) download_if_criteria_met(videos, *args, **kwargs) else: for filename in get_all_download_status_files(): try: videos = json.loads(open(filename).read()) except Exception as e: logging.error(e) raise CommandError("Unable to open %s. The file might be corrupted. Please re-run the generate_subtitle_map command to regenerate it." % filename) try: kwargs["lang_code"] = os.path.basename(filename).split("_")[0] download_if_criteria_met(videos, *args, **kwargs) except Exception as e: logging.error(e) raise CommandError("Error while downloading language srts: %s" % e)
def move_exercises(lang_code): lang_pack_location = os.path.join(LOCALE_ROOT, lang_code) src_exercise_dir = os.path.join(lang_pack_location, "exercises") dest_exercise_dir = get_localized_exercise_dirpath(lang_code, is_central_server=False) if not os.path.exists(src_exercise_dir): logging.warn("Could not find downloaded exercises; skipping: %s" % src_exercise_dir) else: # Move over one at a time, to combine with any other resources that were there before. ensure_dir(dest_exercise_dir) all_exercise_files = glob.glob(os.path.join(src_exercise_dir, "*.html")) logging.info("Moving %d downloaded exercises to %s" % (len(all_exercise_files), dest_exercise_dir)) for exercise_file in all_exercise_files: shutil.move( exercise_file, os.path.join(dest_exercise_dir, os.path.basename(exercise_file))) logging.debug("Removing emtpy directory") try: shutil.rmtree(src_exercise_dir) except Exception as e: logging.error("Error removing dubbed video directory (%s): %s" % (src_exercise_dir, e))
def download_subtitle(youtube_id, lang_code, format="srt"): """ Return subtitles for YouTube ID in language specified. Return False if they do not exist. Update local JSON accordingly. Note: srt map deals with amara, so uses ietf codes (e.g. en-us) """ assert format == "srt", "We only support srt download at the moment." # srt map deals with amara, so uses ietf codes (e.g. en-us) with open(SRTS_JSON_FILEPATH, "r") as fp: api_info_map = json.load(fp) # get amara id amara_code = api_info_map.get(youtube_id).get("amara_code") # make request # Please see http://amara.readthedocs.org/en/latest/api.html base_url = "https://amara.org/api2/partners/videos" resp = make_request( AMARA_HEADERS, "%s/%s/languages/%s/subtitles/?format=srt" % (base_url, amara_code, lang_code.lower()) ) if isinstance(resp, basestring) or not resp: return resp else: # return the subtitle text, replacing empty subtitle lines with # spaces to make the FLV player happy try: resp.encoding = "UTF-8" response = (resp.text or u"").replace("\n\n\n", "\n \n\n").replace("\r\n\r\n\r\n", "\r\n \r\n\r\n") except Exception as e: logging.error(e) response = "client-error" return response
def download_subtitle(youtube_id, lang_code, format="srt"): """Return subtitles for YouTube ID in language specified. Return False if they do not exist. Update local JSON accordingly.""" assert format == "srt", "We only support srt download at the moment." api_info_map = json.loads( open(settings.SUBTITLES_DATA_ROOT + SRTS_JSON_FILENAME).read()) # get amara id amara_code = api_info_map.get(youtube_id).get("amara_code") # make request # Please see http://amara.readthedocs.org/en/latest/api.html base_url = "https://amara.org/api2/partners/videos" r = make_request( headers, "%s/%s/languages/%s/subtitles/?format=srt" % (base_url, amara_code, lang_code)) if isinstance(r, basestring): return r else: # return the subtitle text, replacing empty subtitle lines with # spaces to make the FLV player happy try: r.encoding = "UTF-8" response = (r.text or u"") \ .replace("\n\n\n", "\n \n\n") \ .replace("\r\n\r\n\r\n", "\r\n \r\n\r\n") except Exception as e: logging.error(e) response = "client-error" return response
def login(request, facility): facility_id = facility and facility.id or None facilities = list(Facility.objects.all()) # Fix for #1211: refresh cached facility info when it's free and relevant refresh_session_facility_info(request, facility_count=len(facilities)) if request.method == 'POST': # log out any Django user or facility user logout(request) username = request.POST.get("username", "") password = request.POST.get("password", "") # first try logging in as a Django user user = authenticate(username=username, password=password) if user: auth_login(request, user) return HttpResponseRedirect(request.next or reverse("easy_admin")) # try logging in as a facility user form = LoginForm(data=request.POST, request=request, initial={"facility": facility_id}) if form.is_valid(): user = form.get_user() try: UserLog.begin_user_activity(user, activity_type="login", language=request.language) # Success! Log the event (ignoring validation failures) except ValidationError as e: logging.error("Failed to begin_user_activity upon login: %s" % e) request.session["facility_user"] = user messages.success(request, _("You've been logged in! We hope you enjoy your time with KA Lite ") + _("-- be sure to log out when you finish.")) # Send them back from whence they came landing_page = form.cleaned_data["callback_url"] if not landing_page: # Just going back to the homepage? We can do better than that. landing_page = reverse("coach_reports") if form.get_user().is_teacher else None landing_page = landing_page or (reverse("account_management") if not settings.package_selected("RPi") else reverse("homepage")) return HttpResponseRedirect(form.non_field_errors() or request.next or landing_page) else: messages.error( request, _("There was an error logging you in. Please correct any errors listed below, and try again."), ) else: # render the unbound login form referer = urlparse.urlparse(request.META["HTTP_REFERER"]).path if request.META.get("HTTP_REFERER") else None # never use the homepage as the referer if referer in [reverse("homepage"), reverse("add_facility_student")]: referer = None form = LoginForm(initial={"facility": facility_id, "callback_url": referer}) return { "form": form, "facilities": facilities, }
def generate_test_files(): """Insert asterisks as translations in po files""" # Open them up and insert asterisks for all empty msgstrs logging.info("Generating test po files") en_po_dir = os.path.join(settings.LOCALE_PATHS[0], "en/LC_MESSAGES/") for po_file in glob.glob(os.path.join(en_po_dir, "*.po")): msgid_pattern = re.compile(r'msgid \"(.*)\"\nmsgstr', re.S | re.M) content = open(os.path.join(en_po_dir, po_file), 'r').read() results = content.split("\n\n") with open(os.path.join(en_po_dir, "tmp.po"), 'w') as temp_file: # We know the first block is static, so just dump that. temp_file.write(results[0]) # Now work through actual translations for result in results[1:]: try: msgid = re.findall(msgid_pattern, result)[0] temp_file.write("\n\n") temp_file.write(result.replace("msgstr \"\"", "msgstr \"***%s***\"" % msgid)) except Exception as e: logging.error("Failed to insert test string: %s\n\n%s\n\n" % (e, result)) # Once done replacing, rename temp file to overwrite original os.rename(os.path.join(en_po_dir, "tmp.po"), os.path.join(en_po_dir, po_file)) (out, err, rc) = compile_po_files("en") if err: logging.debug("Error executing compilemessages: %s" % err)
def sync_device_records(self): server_counters = self.get_server_device_counters() client_counters = self.get_client_device_counters() devices_to_download = [] devices_to_upload = [] counters_to_download = {} counters_to_upload = {} for device_id in client_counters: if device_id not in server_counters: devices_to_upload.append(device_id) counters_to_upload[device_id] = 0 elif client_counters[device_id] > server_counters[device_id]: counters_to_upload[device_id] = server_counters[device_id] for device_id in server_counters: if device_id not in client_counters: devices_to_download.append(device_id) counters_to_download[device_id] = 0 elif server_counters[device_id] > client_counters[device_id]: counters_to_download[device_id] = client_counters[device_id] response = json.loads( self.post("device/download", { "devices": devices_to_download }).content) # As usual, we're deserializing from the central server, so we assume that what we're getting # is "smartly" dumbed down for us. We don't need to specify the src_version, as it's # pre-cleaned for us. download_results = save_serialized_models(response.get( "devices", "[]"), increment_counters=False) # BUGFIX(bcipolli) metadata only gets created if models are # streamed; if a device is downloaded but no models are downloaded, # metadata does not exist. Let's just force it here. for device_id in devices_to_download: # force try: d = Device.objects.get(id=device_id) except Exception as e: logging.error( "Exception locating device %s for metadata creation: %s" % (device_id, e)) continue if not d.get_counter_position( ): # this would be nonzero if the device sync'd models d.set_counter_position(counters_to_download[device_id]) self.session.models_downloaded += download_results["saved_model_count"] self.session.errors += download_results.has_key("error") self.session.save() # TODO(jamalex): upload local devices as well? only needed once we have P2P syncing return (counters_to_download, counters_to_upload)
def setUp(self): """Create a browser to use for test cases. Try a bunch of different browsers; hopefully one of them works!""" super(BrowserTestCase, self).setUp() # Clear the session cache after ever test case, to keep things clean. Session.objects.all().delete() # Can use already launched browser. if self.persistent_browser: (self.browser, self.admin_user, self.admin_pass) = setup_test_env( persistent_browser=self.persistent_browser) # Must create a new browser to use else: for browser_type in ["Firefox", "Chrome", "Ie", "Opera"]: try: (self.browser, self.admin_user, self.admin_pass) = setup_test_env( browser_type=browser_type) break except Exception as e: logging.error( "Could not create browser %s through selenium: %s" % (browser_type, e))
def zip_language_packs(lang_codes=None): """Zip up and expose all language packs""" lang_codes = lang_codes or listdir(LOCALE_ROOT) logging.info("Zipping up %d language pack(s)" % len(lang_codes)) ensure_dir(settings.LANGUAGE_PACK_ROOT) for lang in lang_codes: lang_locale_path = os.path.join(LOCALE_ROOT, lang) if not os.path.exists(lang_locale_path): logging.warn("Unexpectedly skipping missing directory: %s" % lang) elif not os.path.isdir(lang_locale_path): logging.error("Skipping language where a file exists: %s" % lang) # Create a zipfile for this language zip_path = os.path.join(settings.LANGUAGE_PACK_ROOT, version.VERSION) ensure_dir(zip_path) z = zipfile.ZipFile(os.path.join(zip_path, "%s.zip" % convert_language_code_format(lang)), 'w') # Get every single file in the directory and zip it up for metadata_file in glob.glob('%s/*.json' % lang_locale_path): z.write(os.path.join(lang_locale_path, metadata_file), arcname=os.path.basename(metadata_file)) for mo_file in glob.glob('%s/LC_MESSAGES/*.mo' % lang_locale_path): z.write(os.path.join(lang_locale_path, mo_file), arcname=os.path.join("LC_MESSAGES", os.path.basename(mo_file))) for srt_file in glob.glob('%s/subtitles/*.srt' % lang_locale_path): z.write(os.path.join(lang_locale_path, srt_file), arcname=os.path.join("subtitles", os.path.basename(srt_file))) z.close() logging.info("Done.")
def __init__(self, comment=None, fixture=None, **kwargs): self.return_dict = {} self.return_dict['comment'] = comment self.return_dict['class'] = type(self).__name__ self.return_dict['uname'] = platform.uname() self.return_dict['fixture'] = fixture self.verbosity = int(kwargs.get("verbosity")) try: branch = subprocess.Popen( ["git", "describe", "--contains", "--all", "HEAD"], stdout=subprocess.PIPE).communicate()[0] self.return_dict['branch'] = branch[:-1] head = subprocess.Popen([ "git", "log", "--pretty=oneline", "--abbrev-commit", "--max-count=1" ], stdout=subprocess.PIPE).communicate()[0] self.return_dict['head'] = head[:-1] except: self.return_dict['branch'] = None self.return_dict['head'] = None # if setup fails, what could we do? try: self._setup(**kwargs) except Exception as e: logging.error(e)
def save(self, update_userlog=True, *args, **kwargs): # To deal with backwards compatibility, # check video_id, whether imported or not. if not self.video_id: assert kwargs.get( "imported", False), "video_id better be set by internal code." assert self.youtube_id, "If not video_id, you better have set youtube_id!" self.video_id = i18n.get_video_id( self.youtube_id ) or self.youtube_id # for unknown videos, default to the youtube_id if not kwargs.get("imported", False): self.full_clean() # Compute learner status already_complete = self.complete self.complete = (self.points >= VideoLog.POINTS_PER_VIDEO) if not already_complete and self.complete: self.completion_timestamp = datetime.now() # Tell logins that they are still active (ignoring validation failures). # TODO(bcipolli): Could log video information in the future. if update_userlog: try: UserLog.update_user_activity( self.user, activity_type="login", update_datetime=(self.completion_timestamp or datetime.now()), language=self.language) except ValidationError as e: logging.error("Failed to update userlog during video: %s" % e) super(VideoLog, self).save(*args, **kwargs)
def update_all_distributed_callback(request): """ """ if request.method != "POST": raise PermissionDenied("Only POST allowed to this URL endpoint.") videos = json.loads(request.POST["video_logs"]) exercises = json.loads(request.POST["exercise_logs"]) user = FacilityUser.objects.get(id=request.POST["user_id"]) node_cache = get_node_cache() # Save videos n_videos_uploaded = 0 for video in videos: video_id = video['video_id'] youtube_id = video['youtube_id'] # Only save video logs for videos that we recognize. if video_id not in node_cache["Video"]: logging.warn("Skipping unknown video %s" % video_id) continue try: (vl, _) = VideoLog.get_or_initialize(user=user, video_id=video_id, youtube_id=youtube_id) for key,val in video.iteritems(): setattr(vl, key, val) logging.debug("Saving video log for %s: %s" % (video_id, vl)) vl.save() n_videos_uploaded += 1 except KeyError: # logging.error("Could not save video log for data with missing values: %s" % video) except Exception as e: error_message = "Unexpected error importing videos: %s" % e return JsonResponseMessageError(error_message) # Save exercises n_exercises_uploaded = 0 for exercise in exercises: # Only save video logs for videos that we recognize. if exercise['exercise_id'] not in node_cache['Exercise']: logging.warn("Skipping unknown video %s" % exercise['exercise_id']) continue try: (el, _) = ExerciseLog.get_or_initialize(user=user, exercise_id=exercise["exercise_id"]) for key,val in exercise.iteritems(): setattr(el, key, val) logging.debug("Saving exercise log for %s: %s" % (exercise['exercise_id'], el)) el.save() n_exercises_uploaded += 1 except KeyError: logging.error("Could not save exercise log for data with missing values: %s" % exercise) except Exception as e: error_message = "Unexpected error importing exercises: %s" % e return JsonResponseMessageError(error_message) return JsonResponse({"success": "Uploaded %d exercises and %d videos" % (n_exercises_uploaded, n_videos_uploaded)})
def test_get_exercise_load_status(self): for path in get_exercise_paths(): logging.debug("Testing path : " + path) self.browser.get(self.live_server_url + path) error_list = self.browser.execute_script("return window.js_errors;") if error_list: logging.error("Found JS error(s) while loading path: " + path) for e in error_list: logging.error(e) self.assertFalse(error_list)
def build_translations(project_id=settings.CROWDIN_PROJECT_ID, project_key=settings.CROWDIN_PROJECT_KEY): """Build latest translations into zip archive on CrowdIn""" logging.info("Requesting that CrowdIn build a fresh zip of our translations") request_url = "http://api.crowdin.net/api/project/%s/export?key=%s" % (project_id, project_key) r = requests.get(request_url) try: r.raise_for_status() except Exception as e: logging.error(e)
def build_translations(project_id=settings.CROWDIN_PROJECT_ID, project_key=settings.CROWDIN_PROJECT_KEY): """Build latest translations into zip archive on CrowdIn.""" logging.info("Requesting that CrowdIn build a fresh zip of our translations") request_url = "http://api.crowdin.net/api/project/%s/export?key=%s" % (project_id, project_key) resp = requests.get(request_url) try: resp.raise_for_status() except Exception as e: logging.error(e)
def login(request, facility): facilities = Facility.objects.all() facility_id = facility and facility.id or None if request.method == 'POST': # log out any Django user or facility user logout(request) username = request.POST.get("username", "") password = request.POST.get("password", "") # first try logging in as a Django user user = authenticate(username=username, password=password) if user: auth_login(request, user) return HttpResponseRedirect(request.next or reverse("easy_admin")) # try logging in as a facility user form = LoginForm(data=request.POST, request=request, initial={"facility": facility_id}) if form.is_valid(): user = form.get_user() try: UserLog.begin_user_activity( user, activity_type="login" ) # Success! Log the event (ignoring validation failures) except ValidationError as e: logging.error("Failed to begin_user_activity upon login: %s" % e) request.session["facility_user"] = user messages.success( request, _("You've been logged in! We hope you enjoy your time with KA Lite " ) + _("-- be sure to log out when you finish.")) landing_page = reverse( "coach_reports") if form.get_user().is_teacher else None landing_page = landing_page or (reverse("account_management") if settings.CONFIG_PACKAGE != "RPi" else reverse("homepage")) return HttpResponseRedirect(form.non_field_errors() or request.next or landing_page) else: messages.error( request, strip_tags(form.non_field_errors()) or _("There was an error logging you in. Please correct any errors listed below, and try again." )) else: # render the unbound login form form = LoginForm(initial={"facility": facility_id}) return {"form": form, "facilities": facilities}
def move_video_sizes_file(lang_code): lang_pack_location = os.path.join(LOCALE_ROOT, lang_code) filename = os.path.basename(REMOTE_VIDEO_SIZE_FILEPATH) src_path = os.path.join(lang_pack_location, filename) dest_path = REMOTE_VIDEO_SIZE_FILEPATH # replace the old remote_video_size json if not os.path.exists(src_path): logging.error("Could not find videos sizes file (%s)" % src_path) else: logging.debug('Moving %s to %s' % (src_path, dest_path)) shutil.move(src_path, dest_path)
def sync_device_records(self): server_counters = self.get_server_device_counters() client_counters = self.get_client_device_counters() devices_to_download = [] devices_to_upload = [] counters_to_download = {} counters_to_upload = {} for device_id in client_counters: if device_id not in server_counters: devices_to_upload.append(device_id) counters_to_upload[device_id] = 0 elif client_counters[device_id] > server_counters[device_id]: counters_to_upload[device_id] = server_counters[device_id] for device_id in server_counters: if device_id not in client_counters: devices_to_download.append(device_id) counters_to_download[device_id] = 0 elif server_counters[device_id] > client_counters[device_id]: counters_to_download[device_id] = client_counters[device_id] response = json.loads(self.post("device/download", {"devices": devices_to_download}).content) # As usual, we're deserializing from the central server, so we assume that what we're getting # is "smartly" dumbed down for us. We don't need to specify the src_version, as it's # pre-cleaned for us. download_results = save_serialized_models(response.get("devices", "[]"), increment_counters=False) # BUGFIX(bcipolli) metadata only gets created if models are # streamed; if a device is downloaded but no models are downloaded, # metadata does not exist. Let's just force it here. for device_id in devices_to_download: # force try: d = Device.objects.get(id=device_id) except Exception as e: logging.error("Exception locating device %s for metadata creation: %s" % (device_id, e)) continue if not d.get_counter_position(): # this would be nonzero if the device sync'd models d.set_counter_position(counters_to_download[device_id]) self.session.models_downloaded += download_results["saved_model_count"] self.session.errors += download_results.has_key("error") self.session.save() # TODO(jamalex): upload local devices as well? only needed once we have P2P syncing return (counters_to_download, counters_to_upload)
def write_new_json(subtitle_counts, data_path): """Write JSON to file in static/data/subtitles/""" filename = "subtitle_counts.json" filepath = data_path + filename try: current_counts = json.loads(open(filepath).read()) except Exception as e: logging.error("Subtitle counts file appears to be corrupted (%s). Starting from scratch." % e) current_counts = {} current_counts.update(subtitle_counts) logging.info("Writing fresh srt counts to %s" % filepath) with open(filepath, 'wb') as fp: json.dump(current_counts, fp)
def logout(request): if "facility_user" in request.session: # Logout, ignore any errors. try: UserLog.end_user_activity(request.session["facility_user"], activity_type="login") except ValidationError as e: logging.error("Failed to end_user_activity upon logout: %s" % e) del request.session["facility_user"] auth_logout(request) next = request.GET.get("next", reverse("homepage")) if next[0] != "/": next = "/" return HttpResponseRedirect(next)
def _execute(self): current_activity = "begin" endtime = time.time() + (self.duration * 60.) while True: if time.time() >= endtime: current_activity = "end" # Prep and do the current activity try: start_clock_time = datetime.datetime.today() start_time = time.time() result = self.activity[current_activity]["method"]( self.activity[current_activity]["args"]) self.return_list.append(( current_activity, '%02d:%02d:%02d' % (start_clock_time.hour, start_clock_time.minute, start_clock_time.second), round((time.time() - start_time), 2), )) except Exception as e: if current_activity != "end": raise else: logging.error("Error on end: %s" % e) if current_activity == "end": break # Wait before the next activity if "duration" in self.activity[current_activity]: if self.verbosity >= 2: print "(" + str(self.behavior_profile - 24601 ) + ")" + "sleeping for ", self.activity[ current_activity]["duration"] time.sleep(self.activity[current_activity]["duration"]) # Choose the next activity next_activity_random = round(self.random.random(), 2) for threshold, next_activity in self.activity[current_activity][ "nextstep"]: if threshold >= next_activity_random: if self.verbosity >= 2: print "(" + str(self.behavior_profile - 24601) + ")" + str( next_activity_random ), "next_activity =", next_activity current_activity = next_activity break
def handle(self, *args, **options): if len(args)==1 and args[0]== "test": # Callback for "weak" test--checks at least that the django project compiles (local_settings is OK) sys.stdout.write("Success!\n") exit(0) if options.get("repo", None): # Specified a repo self.update_via_git(**options) elif options.get("zip_file", None): # Specified a file if not os.path.exists(options.get("zip_file")): raise CommandError("Specified zip file does not exist: %s" % options.get("zip_file")) self.update_via_zip(**options) elif os.path.exists(settings.PROJECT_PATH + "/../.git"): # Without params, if we detect a git repo, try git self.update_via_git(**options) elif len(args) > 1: raise CommandError("Too many command-line arguments.") elif len(args) == 1: # Specify zip via first command-line arg if options['zip_file'] is not None: raise CommandError("Cannot specify a zipfile as unnamed and named command-line arguments at the same time.") options['zip_file'] = args[0] self.update_via_zip(**options) else: # No params, no git repo: try to get a file online. zip_file = tempfile.mkstemp()[1] for url in ["https://github.com/learningequality/ka-lite/archive/master.zip", "http://%s/download/kalite/%s/%s/" % (settings.CENTRAL_SERVER_HOST, platform.system().lower(), "all")]: logging.info("Downloading repo snapshot from %s to %s" % (url, zip_file)) try: urllib.urlretrieve(url, zip_file) sys.stdout.write("success @ %s\n" % url) break; except Exception as e: logging.error("Failed to get zipfile from %s: %s" % (url, e)) continue self.update_via_zip(zip_file=zip_file, **options) self.stdout.write("Update is complete!\n")
def write_new_json(subtitle_counts, data_path): """Write JSON to file in static/data/subtitles/""" filename = "subtitle_counts.json" filepath = data_path + filename try: current_counts = json.loads(open(filepath).read()) except Exception as e: logging.error( "Subtitle counts file appears to be corrupted (%s). Starting from scratch." % e) current_counts = {} current_counts.update(subtitle_counts) logging.info("Writing fresh srt counts to %s" % filepath) with open(filepath, 'wb') as fp: json.dump(current_counts, fp)
def get_file2lang_map(force=False): """Map from youtube_id to language code""" global YT2LANG_MAP if YT2LANG_MAP is None or force: YT2LANG_MAP = {} for lang_code, dic in get_dubbed_video_map().iteritems(): for dubbed_youtube_id in dic.values(): if dubbed_youtube_id in YT2LANG_MAP: # Sanity check, but must be failsafe, since we don't control these data if YT2LANG_MAP[dubbed_youtube_id] == lang_code: logging.warn("Duplicate entry found in %s language map for dubbed video %s" % (lang_code, dubbed_youtube_id)) else: logging.error("Conflicting entry found in language map for video %s; overwriting previous entry of %s to %s." % (dubbed_youtube_id, YT2LANG_MAP[dubbed_youtube_id], lang_code)) YT2LANG_MAP[dubbed_youtube_id] = lang_code return YT2LANG_MAP
def write_count_to_json(subtitle_counts, data_path): """Write JSON to file in static/data/subtitles/""" try: with open(SUBTITLE_COUNTS_FILEPATH, "r") as fp: current_counts = json.load(fp) except Exception as e: logging.error("Subtitle counts file appears to be corrupted (%s). Starting from scratch." % e) current_counts = {} current_counts.update(subtitle_counts) logging.debug("Writing fresh srt counts to %s" % SUBTITLE_COUNTS_FILEPATH) with open(SUBTITLE_COUNTS_FILEPATH, "wb") as fp: # sort here, so we don't have to sort later when seving to clients json.dump(current_counts, fp, sort_keys=True)
def account_management(request, org_id=None): # Only log 'coachreport' activity for students, # (otherwise it's hard to compare teachers) if "facility_user" in request.session and not request.session["facility_user"].is_teacher and reverse("login") not in request.META.get("HTTP_REFERER", ""): try: # Log a "begin" and end here user = request.session["facility_user"] UserLog.begin_user_activity(user, activity_type="coachreport") UserLog.update_user_activity(user, activity_type="login") # to track active login time for teachers UserLog.end_user_activity(user, activity_type="coachreport") except ValidationError as e: # Never report this error; don't want this logging to block other functionality. logging.error("Failed to update student userlog activity: %s" % e) return student_view_context(request)
def move_dubbed_video_map(lang_code): lang_pack_location = os.path.join(LOCALE_ROOT, lang_code) dubbed_video_dir = os.path.join(lang_pack_location, "dubbed_videos") dvm_filepath = os.path.join(dubbed_video_dir, os.path.basename(DUBBED_VIDEOS_MAPPING_FILEPATH)) if not os.path.exists(dvm_filepath): logging.error("Could not find downloaded dubbed video filepath: %s" % dvm_filepath) else: logging.debug("Moving dubbed video map to %s" % DUBBED_VIDEOS_MAPPING_FILEPATH) ensure_dir(os.path.dirname(DUBBED_VIDEOS_MAPPING_FILEPATH)) shutil.move(dvm_filepath, DUBBED_VIDEOS_MAPPING_FILEPATH) logging.debug("Removing emtpy directory") try: shutil.rmtree(dubbed_video_dir) except Exception as e: logging.error("Error removing dubbed video directory (%s): %s" % (dubbed_video_dir, e))
def load_data_for_offline_install(in_file): """ Receives a serialized file for import. Import the file--nothing more! File should contain: * Central server object and, optionally * Zone object * Device and DeviceZone / ZoneInvitation objects (chain of trust) Essentially duplicates code from securesync.device.api_client:RegistrationClient """ assert os.path.exists(in_file), "in_file must exist." with open(in_file, "r") as fp: models = engine.deserialize( fp.read()) # all must be in a consistent version # First object should be the central server. try: central_server = models.next().object except Exception as e: logging.debug("Exception loading central server object: %s" % e) return logging.debug("Saving object %s" % central_server) assert isinstance(central_server, Device) central_server.save(imported=True, is_trusted=True) # Everything else, import as is. invitation = None for model in models: try: logging.debug("Saving object %s" % model.object) model.object.save(imported=True) if isinstance(model.object, ZoneInvitation): # Zone info existed in the data blob we received. Use it to join the zone! invitation = model.object if invitation.used_by is None: invitation.claim(used_by=Device.get_own_device()) except ValidationError as e: # Happens when there's duplication of data, sometimes. # Shouldn't happen, but keeping this here to make things # a bit more robust. logging.error("Failed to import model %s" % model) return invitation
def login(request, facility): facilities = Facility.objects.all() facility_id = facility and facility.id or None if request.method == 'POST': # log out any Django user or facility user logout(request) username = request.POST.get("username", "") password = request.POST.get("password", "") # first try logging in as a Django user user = authenticate(username=username, password=password) if user: auth_login(request, user) return HttpResponseRedirect(request.next or reverse("easy_admin")) # try logging in as a facility user form = LoginForm(data=request.POST, request=request, initial={"facility": facility_id}) if form.is_valid(): user = form.get_user() try: UserLog.begin_user_activity(user, activity_type="login") # Success! Log the event (ignoring validation failures) except ValidationError as e: logging.error("Failed to begin_user_activity upon login: %s" % e) request.session["facility_user"] = user messages.success(request, _("You've been logged in! We hope you enjoy your time with KA Lite ") + _("-- be sure to log out when you finish.")) landing_page = reverse("coach_reports") if form.get_user().is_teacher else None landing_page = landing_page or (reverse("account_management") if settings.CONFIG_PACKAGE != "RPi" else reverse("homepage")) return HttpResponseRedirect(form.non_field_errors() or request.next or landing_page) else: messages.error( request, strip_tags(form.non_field_errors()) or _("There was an error logging you in. Please correct any errors listed below, and try again.") ) else: # render the unbound login form form = LoginForm(initial={"facility": facility_id}) return { "form": form, "facilities": facilities }
def download_srt_from_3rd_party(*args, **kwargs): """Download subtitles specified by command line args""" lang_code = kwargs.get("lang_code", None) # if language specified, do those, if not do all if not lang_code: raise CommandError("You must specify a language code or 'all' with -l") elif lang_code == "all": bad_languages = {} for filename in get_all_download_status_files(): try: videos = json.loads(open(filename).read()) except Exception as e: logging.error(e) raise CommandError( "Unable to open %s. The file might be corrupted. Please re-run the generate_subtitle_map command to regenerate it." % filename) try: lang_code = os.path.basename(filename).split("_")[0] kwargs["lang_code"] = lang_code download_if_criteria_met(videos, *args, **kwargs) except Exception as e: logging.error("Error downloading subtitles for %s: %s" % (lang_code, e)) bad_languages[lang_code] = e continue # now report final results if bad_languages: raise CommandError( "Failed to download subtitles for the following languages: %s" % bad_languages.keys()) else: srt_list_path = get_lang_map_filepath( convert_language_code_format(lang_code)) try: videos = json.loads(open(srt_list_path).read()) except: logging.warning( "No subtitles available for download for language code %s. Skipping." % lang_code) else: download_if_criteria_met(videos, *args, **kwargs)
def load_data_for_offline_install(in_file): """ Receives a serialized file for import. Import the file--nothing more! File should contain: * Central server object and, optionally * Zone object * Device and DeviceZone / ZoneInvitation objects (chain of trust) Essentially duplicates code from securesync.device.api_client:RegistrationClient """ assert os.path.exists(in_file), "in_file must exist." with open(in_file, "r") as fp: models = engine.deserialize(fp.read()) # all must be in a consistent version # First object should be the central server. try: central_server = models.next().object except Exception as e: logging.debug("Exception loading central server object: %s" % e) return logging.debug("Saving object %s" % central_server) assert isinstance(central_server, Device) central_server.save(imported=True, is_trusted=True) # Everything else, import as is. invitation = None for model in models: try: logging.debug("Saving object %s" % model.object) model.object.save(imported=True) if isinstance(model.object, ZoneInvitation): # Zone info existed in the data blob we received. Use it to join the zone! invitation = model.object if invitation.used_by is None: invitation.claim(used_by=Device.get_own_device()) except ValidationError as e: # Happens when there's duplication of data, sometimes. # Shouldn't happen, but keeping this here to make things # a bit more robust. logging.error("Failed to import model %s" % model) return invitation
def setUp(self): """Create a browser to use for test cases. Try a bunch of different browsers; hopefully one of them works!""" super(BrowserTestCase, self).setUp() # Can use already launched browser. if self.persistent_browser: (self.browser,self.admin_user,self.admin_pass) = setup_test_env(persistent_browser=self.persistent_browser) # Must create a new browser to use else: for browser_type in ["Firefox", "Chrome", "Ie", "Opera"]: try: (self.browser,self.admin_user,self.admin_pass) = setup_test_env(browser_type=browser_type) break except Exception as e: logging.error("Could not create browser %s through selenium: %s" % (browser_type, e))
def download_crowdin_metadata(project_id=None, project_key=None): """Return tuple in format (total_strings, total_translated, percent_translated)""" if not project_id: project_id = settings.CROWDIN_PROJECT_ID if not project_key: project_key = settings.CROWDIN_PROJECT_KEY request_url = "http://api.crowdin.net/api/project/%s/status?key=%s&json=True" % (project_id, project_key) try: resp = requests.get(request_url) resp.raise_for_status() crowdin_meta_dict = json.loads(resp.content) except Exception as e: logging.error("Error getting crowdin metadata: %s" % e) crowdin_meta_dict = {} return crowdin_meta_dict
def record_ping(cls, id, ip): """ We received a failed request to create a session; record that 'ping' in our DB """ try: # Create the log (if necessary), update, and save # TODO: make a base class (in django_utils) that has get_or_initialize, and use that # to shorten things here (cur_device, _) = UnregisteredDevice.objects.get_or_create(id=id) (cur_log, _) = cls.get_or_initialize(device=cur_device) # get is safe, because device is unique cur_log.npings += 1 cur_log.last_ip = ip cur_log.save() except Exception as e: # Never block functionality logging.error("Error recording unregistered device ping: %s" % e)
def download_crowdin_metadata(project_id=None, project_key=None): """Return tuple in format (total_strings, total_translated, percent_translated)""" if not project_id: project_id = settings.CROWDIN_PROJECT_ID if not project_key: project_key = settings.CROWDIN_PROJECT_KEY request_url = "http://api.crowdin.net/api/project/%s/status?key=%s&json=True" % ( project_id, project_key) try: resp = requests.get(request_url) resp.raise_for_status() crowdin_meta_dict = json.loads(resp.content) except Exception as e: logging.error("Error getting crowdin metadata: %s" % e) crowdin_meta_dict = {} return crowdin_meta_dict
def execute(self, iterations=1): if iterations < 1: iterations = 1 if hasattr(self, 'max_iterations'): if iterations > self.max_iterations: iterations = self.max_iterations self.return_dict['iterations'] = iterations self.return_dict['individual_elapsed'] = {} self.return_dict['post_execute_info'] = {} self.return_dict['exceptions'] = {} for i in range(iterations): self.return_dict['exceptions'][i + 1] = [] start_time = time.time() try: self._execute() self.return_dict['individual_elapsed'][ i + 1] = time.time() - start_time except Exception as e: self.return_dict['individual_elapsed'][i + 1] = None self.return_dict['exceptions'][i + 1].append(e) try: self.return_dict['post_execute_info'][ i + 1] = self._get_post_execute_info() except Exception as e: self.return_dict['post_execute_info'][i + 1] = None self.return_dict['exceptions'][i + 1].append(e) mean = lambda vals: sum(vals) / float(len(vals)) if len(vals) else None self.return_dict['average_elapsed'] = mean([ v for v in self.return_dict['individual_elapsed'].values() if v is not None ]) try: self._teardown() except Exception as e: logging.error(e) return self.return_dict
def record_ping(cls, id, ip): """ We received a failed request to create a session; record that 'ping' in our DB """ try: # Create the log (if necessary), update, and save # TODO: make a base class (in django_utils) that has get_or_initialize, and use that # to shorten things here (cur_device, _) = UnregisteredDevice.objects.get_or_create(id=id) (cur_log, _) = cls.get_or_initialize( device=cur_device) # get is safe, because device is unique cur_log.npings += 1 cur_log.last_ip = ip cur_log.save() except Exception as e: # Never block functionality logging.error("Error recording unregistered device ping: %s" % e)
def save(self, *args, **kwargs): if not kwargs.get("imported", False): self.full_clean() # Compute learner status already_complete = self.complete self.complete = (self.points >= VideoLog.POINTS_PER_VIDEO) if not already_complete and self.complete: self.completion_timestamp = datetime.now() self.completion_counter = Device.get_own_device().get_counter() # Tell logins that they are still active (ignoring validation failures). # TODO(bcipolli): Could log video information in the future. try: UserLog.update_user_activity(self.user, activity_type="login", update_datetime=(self.completion_timestamp or datetime.now())) except ValidationError as e: logging.error("Failed to update userlog during video: %s" % e) super(VideoLog, self).save(*args, **kwargs)
def _get_installed_language_packs(): """ On-disk method to show currently installed languages and meta data. """ # There's always English... installed_language_packs = [{ 'code': 'en', 'software_version': VERSION, 'language_pack_version': 0, 'percent_translated': 100, 'subtitle_count': 0, 'name': 'English', 'native_name': 'English', }] # Loop through locale folders for locale_dir in settings.LOCALE_PATHS: if not os.path.exists(locale_dir): continue # Loop through folders in each locale dir for django_disk_code in os.listdir(locale_dir): # Inside each folder, read from the JSON file - language name, % UI trans, version number try: # Get the metadata metadata_filepath = os.path.join(locale_dir, django_disk_code, "%s_metadata.json" % lcode_to_ietf(django_disk_code)) lang_meta = softload_json(metadata_filepath, raises=True) logging.debug("Found language pack %s" % (django_disk_code)) except Exception as e: if isinstance(e, IOError) and e.errno == 2: logging.info("Ignoring non-language pack %s in %s" % (django_disk_code, locale_dir)) else: logging.error("Error reading %s metadata (%s): %s" % (django_disk_code, metadata_filepath, e)) continue installed_language_packs.append(lang_meta) sorted_list = sorted(installed_language_packs, key=lambda m: m['name'].lower()) return OrderedDict([(lcode_to_ietf(val["code"]), val) for val in sorted_list])
def move_dubbed_video_map(lang_code): lang_pack_location = os.path.join(LOCALE_ROOT, lang_code) dubbed_video_dir = os.path.join(lang_pack_location, "dubbed_videos") dvm_filepath = os.path.join( dubbed_video_dir, os.path.basename(DUBBED_VIDEOS_MAPPING_FILEPATH)) if not os.path.exists(dvm_filepath): logging.error("Could not find downloaded dubbed video filepath: %s" % dvm_filepath) else: logging.debug("Moving dubbed video map to %s" % DUBBED_VIDEOS_MAPPING_FILEPATH) ensure_dir(os.path.dirname(DUBBED_VIDEOS_MAPPING_FILEPATH)) shutil.move(dvm_filepath, DUBBED_VIDEOS_MAPPING_FILEPATH) logging.debug("Removing emtpy directory") try: shutil.rmtree(dubbed_video_dir) except Exception as e: logging.error("Error removing dubbed video directory (%s): %s" % (dubbed_video_dir, e))
def _execute(self): current_activity = "begin" endtime = time.time() + (self.duration * 60.) while True: if time.time() >= endtime: current_activity = "end" # Prep and do the current activity try: start_clock_time = datetime.datetime.today() start_time = time.time() result=self.activity[current_activity]["method"](self.activity[current_activity]["args"]) self.return_list.append(( current_activity, '%02d:%02d:%02d' % (start_clock_time.hour,start_clock_time.minute,start_clock_time.second), round((time.time() - start_time),2), )) except Exception as e: if current_activity != "end": raise else: logging.error("Error on end: %s" % e) if current_activity == "end": break # Wait before the next activity if "duration" in self.activity[current_activity]: if self.verbosity >= 2: print "(" + str(self.behavior_profile-24601) + ")" + "sleeping for ", self.activity[current_activity]["duration"] time.sleep(self.activity[current_activity]["duration"]) # Choose the next activity next_activity_random = round(self.random.random(),2) for threshold, next_activity in self.activity[current_activity]["nextstep"]: if threshold >= next_activity_random: if self.verbosity >= 2: print "(" + str(self.behavior_profile-24601) + ")" + str(next_activity_random), "next_activity =", next_activity current_activity = next_activity break
def move_exercises(lang_code): lang_pack_location = os.path.join(LOCALE_ROOT, lang_code) src_exercise_dir = os.path.join(lang_pack_location, "exercises") dest_exercise_dir = get_localized_exercise_dirpath(lang_code, is_central_server=False) if not os.path.exists(src_exercise_dir): logging.warn("Could not find downloaded exercises; skipping: %s" % src_exercise_dir) else: # Move over one at a time, to combine with any other resources that were there before. ensure_dir(dest_exercise_dir) all_exercise_files = glob.glob(os.path.join(src_exercise_dir, "*.html")) logging.info("Moving %d downloaded exercises to %s" % (len(all_exercise_files), dest_exercise_dir)) for exercise_file in all_exercise_files: shutil.move(exercise_file, os.path.join(dest_exercise_dir, os.path.basename(exercise_file))) logging.debug("Removing emtpy directory") try: shutil.rmtree(src_exercise_dir) except Exception as e: logging.error("Error removing dubbed video directory (%s): %s" % (src_exercise_dir, e))
def save(self, *args, **kwargs): if not kwargs.get("imported", False): self.full_clean() # Compute learner status already_complete = self.complete self.complete = (self.points >= VideoLog.POINTS_PER_VIDEO) if not already_complete and self.complete: self.completion_timestamp = datetime.now() self.completion_counter = Device.get_own_device().get_counter() # Tell logins that they are still active (ignoring validation failures). # TODO(bcipolli): Could log video information in the future. try: UserLog.update_user_activity( self.user, activity_type="login", update_datetime=(self.completion_timestamp or datetime.now())) except ValidationError as e: logging.error("Failed to update userlog during video: %s" % e) super(VideoLog, self).save(*args, **kwargs)