def setUp(self): """Create a browser to use for test cases. Try a bunch of different browsers; hopefully one of them works!""" super(BrowserTestCase, self).setUp() # Clear the session cache after ever test case, to keep things clean. Session.objects.all().delete() # Can use already launched browser. if self.persistent_browser: (self.browser, self.admin_user, self.admin_pass) = setup_test_env( persistent_browser=self.persistent_browser) # Must create a new browser to use else: for browser_type in ["Firefox", "Chrome", "Ie", "Opera"]: try: (self.browser, self.admin_user, self.admin_pass) = setup_test_env( browser_type=browser_type) break except Exception as e: logging.error( "Could not create browser %s through selenium: %s" % (browser_type, e))
def force_job(command, name="", frequency="YEARLY", stop=False, launch_cron=True): """ Mark a job as to run immediately (or to stop). By default, call cron directly, to resolve. """ jobs = Job.objects.filter(command=command) if jobs.count() > 0: job = jobs[0] else: job = Job(command=command) job.frequency = frequency job.name = name or command if stop: job.is_running = False else: job.next_run = datetime.now() job.save() if launch_cron: # Just start cron directly, so that the process starts immediately. # Note that if you're calling force_job frequently, then # you probably want to avoid doing this on every call. if get_count() and not job_status(command): logging.debug("Ready to launch command '%s'" % command) call_command_async("cron", manage_py_dir=settings.PROJECT_PATH)
def save(self, update_userlog=True, *args, **kwargs): # To deal with backwards compatibility, # check video_id, whether imported or not. if not self.video_id: assert kwargs.get( "imported", False), "video_id better be set by internal code." assert self.youtube_id, "If not video_id, you better have set youtube_id!" self.video_id = i18n.get_video_id( self.youtube_id ) or self.youtube_id # for unknown videos, default to the youtube_id if not kwargs.get("imported", False): self.full_clean() # Compute learner status already_complete = self.complete self.complete = (self.points >= VideoLog.POINTS_PER_VIDEO) if not already_complete and self.complete: self.completion_timestamp = datetime.now() # Tell logins that they are still active (ignoring validation failures). # TODO(bcipolli): Could log video information in the future. if update_userlog: try: UserLog.update_user_activity( self.user, activity_type="login", update_datetime=(self.completion_timestamp or datetime.now()), language=self.language) except ValidationError as e: logging.error("Failed to update userlog during video: %s" % e) super(VideoLog, self).save(*args, **kwargs)
def download_srt_from_3rd_party(*args, **kwargs): """Download subtitles specified by command line args""" lang_code = kwargs.get("lang_code", None) # if language specified, do those, if not do all if lang_code: srt_list_path = get_lang_map_filepath(lang_code) try: videos = json.loads(open(srt_list_path).read()) except: raise LanguageCodeDoesNotExist(lang_code) download_if_criteria_met(videos, *args, **kwargs) else: for filename in get_all_download_status_files(): try: videos = json.loads(open(filename).read()) except Exception as e: logging.error(e) raise CommandError("Unable to open %s. The file might be corrupted. Please re-run the generate_subtitle_map command to regenerate it." % filename) try: kwargs["lang_code"] = os.path.basename(filename).split("_")[0] download_if_criteria_met(videos, *args, **kwargs) except Exception as e: logging.error(e) raise CommandError("Error while downloading language srts: %s" % e)
def end_user_activity(cls, user, activity_type="login", end_datetime=None, suppress_save=False): # don't accept language--we're just closing previous activity. """Helper function to complete an existing user activity log entry.""" # Do nothing if the max # of records is zero # (i.e. this functionality is disabled) if not cls.is_enabled(): return if not user: raise ValidationError("A valid user must always be specified.") if not end_datetime: # must be done outside the function header (else becomes static) end_datetime = datetime.now() activity_type = cls.get_activity_int(activity_type) cur_log = cls.get_latest_open_log_or_None(user=user, activity_type=activity_type) if cur_log: # How could you start after you ended?? if cur_log.start_datetime > end_datetime: raise ValidationError("Update time must always be later than the login time.") else: # No unstopped starts. Start should have been called first! logging.warn("%s: Had to BEGIN a user log entry, but ENDING(%d)! @ %s" % (user.username, activity_type, end_datetime)) cur_log = cls.begin_user_activity(user=user, activity_type=activity_type, start_datetime=end_datetime, suppress_save=True) logging.debug("%s: Logging LOGOUT activity @ %s" % (user.username, end_datetime)) cur_log.end_datetime = end_datetime if not suppress_save: cur_log.save() # total-seconds will be computed here. return cur_log
def recurse_nodes_to_clean_related_videos(node): """ Internal function for recursing the topic tree and marking related exercises. Requires rebranding of metadata done by recurse_nodes function. """ def get_video_node(video_slug, node): if node["kind"] == "Topic": for child in node.get("children", []): video_node = get_video_node(video_slug, child) if video_node: return video_node elif node["kind"] == "Video" and node["slug"] == video_slug: return node return None if node["kind"] == "Exercise": videos_to_delete = [] for vi, video_slug in enumerate( node["related_video_readable_ids"]): if not get_video_node(video_slug, topictree): videos_to_delete.append(vi) for vi in reversed(videos_to_delete): logging.warn("Deleting unknown video %s" % node["related_video_readable_ids"][vi]) del node["related_video_readable_ids"][vi] for child in node.get("children", []): recurse_nodes_to_clean_related_videos(child)
def call_outside_command_with_output(kalite_location, command, *args, **kwargs): """ Runs call_command for a KA Lite installation at the given location, and returns the output. """ # build the command cmd = (sys.executable,kalite_location + "/kalite/manage.py",command) for arg in args: cmd += (arg,) for key,val in kwargs.items(): key = key.replace("_","-") prefix = "--" if command != "runcherrypyserver" else "" if isinstance(val,bool): cmd += ("%s%s" % (prefix,key),) else: cmd += ("%s%s=%s" % (prefix,key,str(val)),) logging.debug(cmd) # Execute the command, using subprocess/Popen cwd = os.getcwd() os.chdir(kalite_location + "/kalite") p = subprocess.Popen(cmd, shell=False, cwd=os.path.split(cmd[0])[0], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out = p.communicate() os.chdir(cwd) logging.debug(out[1] if out[1] else out[0]) # tuple output of stdout, stderr, and exit code return out + (1 if out[1] else 0,)
def handle(self, *args, **options): if len(args)==1 and args[0]== "test": # Callback for "weak" test--checks at least that the django project compiles (local_settings is OK) sys.stdout.write("Success!\n") exit(0) try: if options.get("branch", None): # Specified a repo self.update_via_git(**options) elif options.get("zip_file", None): # Specified a file if not os.path.exists(options.get("zip_file")): raise CommandError("Specified zip file does not exist: %s" % options.get("zip_file")) self.update_via_zip(**options) elif options.get("url", None): self.update_via_zip(**options) elif os.path.exists(settings.PROJECT_PATH + "/../.git"): # If we detect a git repo, try git if len(args) == 1 and not options["branch"]: options["branch"] = args[0] elif len(args) != 0: raise CommandError("Specified too many command-line arguments") self.update_via_git(**options) elif len(args) > 1: raise CommandError("Too many command-line arguments.") elif len(args) == 1: # Specify zip via first command-line arg if options['zip_file'] is not None: raise CommandError("Cannot specify a zipfile as unnamed and named command-line arguments at the same time.") options['zip_file'] = args[0] self.update_via_zip(**options) else: # No params, no git repo: try to get a file online. zip_file = tempfile.mkstemp()[1] for url in ["http://%s/api/download/kalite/latest/%s/%s/" % (settings.CENTRAL_SERVER_HOST, platform.system().lower(), "en")]: logging.info("Downloading repo snapshot from %s to %s" % (url, zip_file)) try: urllib.urlretrieve(url, zip_file) sys.stdout.write("success @ %s\n" % url) break; except Exception as e: logging.debug("Failed to get zipfile from %s: %s" % (url, e)) continue options["zip_file"] = zip_file self.update_via_zip(**options) except Exception as e: if self.started() and not not self.ended(): self.cancel(stage_status="error", notes=unicode(e)) raise assert self.ended(), "Subroutines should complete() if they start()!"
def __init__(self, comment=None, fixture=None, **kwargs): self.return_dict = {} self.return_dict['comment'] = comment self.return_dict['class']=type(self).__name__ self.return_dict['uname'] = platform.uname() self.return_dict['fixture'] = fixture try: self.verbosity = int(kwargs.get("verbosity")) except: self.verbosity = 1 try: branch = subprocess.Popen(["git", "describe", "--contains", "--all", "HEAD"], stdout=subprocess.PIPE).communicate()[0] self.return_dict['branch'] = branch[:-1] head = subprocess.Popen(["git", "log", "--pretty=oneline", "--abbrev-commit", "--max-count=1"], stdout=subprocess.PIPE).communicate()[0] self.return_dict['head'] = head[:-1] except: self.return_dict['branch'] = None self.return_dict['head'] = None # if setup fails, what could we do? # let the exception bubble up is the best. try: self._setup(**kwargs) except Exception as e: logging.debug("Failed setup (%s); trying to tear down" % e) try: self._teardown() except: pass raise e
def force_job(command, name="", frequency="YEARLY", stop=False, launch_cron=True): """ Mark a job as to run immediately (or to stop). By default, call cron directly, to resolve. """ jobs = Job.objects.filter(command=command) if jobs.count() > 0: job = jobs[0] else: job = Job(command=command) job.frequency = frequency job.name = name or command if stop: job.is_running = False else: job.next_run = datetime.now() job.save() if launch_cron: # Just start cron directly, so that the process starts immediately. # Note that if you're calling force_job frequently, then # you probably want to avoid doing this on every call. if get_count() and not job_status(command): logging.debug("Ready to launch command '%s'" % command) call_command_async("cron")
def handle(self, *args, **options): if not options["lang_code"]: raise CommandError("You must specify a language code.") lang_code = lcode_to_ietf(options["lang_code"]) if lang_code not in AVAILABLE_EXERCISE_LANGUAGE_CODES: logging.info("No exercises available for language %s" % lang_code) else: # Get list of exercises exercise_ids = options["exercise_ids"].split( ",") if options["exercise_ids"] else None exercise_ids = exercise_ids or ([ ex["id"] for ex in get_topic_exercises(topic_id=options["topic_id"]) ] if options["topic_id"] else None) exercise_ids = exercise_ids or get_node_cache("Exercise").keys() # Download the exercises for exercise_id in exercise_ids: scrape_exercise(exercise_id=exercise_id, lang_code=lang_code, force=options["force"]) logging.info("Process complete.")
def am_i_online(url, expected_val=None, search_string=None, timeout=5, allow_redirects=True): """Test whether we are online or not. returns True or False. Eats all exceptions! """ assert not (search_string and expected_val is not None), "Search string and expected value cannot both be set" try: if not search_string and expected_val is None: response = requests.head(url) else: response = requests.get(url, timeout=timeout, allow_redirects=allow_redirects) # Validate that response came from the requested url if response.status_code != 200: return False elif not allow_redirects and response.url != url: return False # Check the output, if expected values are specified if expected_val is not None: return expected_val == response.text elif search_string: return search_string in response.text return True except Exception as e: logging.debug("am_i_online: %s" % e) return False
def handle(self, *args, **options): if not settings.CENTRAL_SERVER: raise CommandError("This must only be run on the central server.") # Set up the refresh date if not options["date_since_attempt"]: date_since_attempt = datetime.datetime.now() - datetime.timedelta( days=options["days_since_attempt"]) options["date_since_attempt"] = date_since_attempt.strftime( "%m/%d/%Y") converted_date = convert_date_input(options.get("date_since_attempt")) updated_mappings = create_all_mappings( force=options.get("force"), frequency_to_save=5, response_to_check=options.get("response_code"), date_to_check=converted_date) logging.info( "Executed successfully. Updating language => subtitle mapping to record any changes!" ) if updated_mappings: language_srt_map = update_language_srt_map() print_language_availability_table(language_srt_map) logging.info("Process complete.")
def update_user_activity(cls, user, activity_type="login", update_datetime=None, language=language, suppress_save=False): """Helper function to update an existing user activity log entry.""" # Do nothing if the max # of records is zero # (i.e. this functionality is disabled) if not cls.is_enabled(): return if not user: raise ValidationError("A valid user must always be specified.") if not update_datetime: # must be done outside the function header (else becomes static) update_datetime = datetime.now() activity_type = cls.get_activity_int(activity_type) cur_log = cls.get_latest_open_log_or_None(user=user, activity_type=activity_type) if cur_log: # How could you start after you updated?? if cur_log.start_datetime > update_datetime: raise ValidationError("Update time must always be later than the login time.") else: # No unstopped starts. Start should have been called first! logging.warn("%s: Had to create a user log entry on an UPDATE(%d)! @ %s" % (user.username, activity_type, update_datetime)) cur_log = cls.begin_user_activity(user=user, activity_type=activity_type, start_datetime=update_datetime, suppress_save=True) logging.debug("%s: UPDATE activity (%d) @ %s" % (user.username, activity_type, update_datetime)) cur_log.last_active_datetime = update_datetime cur_log.language = language or cur_log.language # set the language to the current language, if there is one. if not suppress_save: cur_log.save() return cur_log
def validate_times(srt_content, srt_issues): times = re.findall("([0-9:,]+) --> ([0-9:,]+)\r\n", srt_content, re.S | re.M) parse_time = lambda str: datetime.datetime.strptime(str, "%H:%M:%S,%f") for i in range(len(times)): try: between_subtitle_time = datediff(parse_time(times[i][0]), parse_time(times[i-1][1] if i > 0 else "00:00:00,000")) within_subtitle_time = datediff(parse_time(times[i][1]), parse_time(times[i][0])) if between_subtitle_time > 60.: srt_issues.append("Between-subtitle gap of %5.2f seconds" % between_subtitle_time) if within_subtitle_time > 60.: srt_issues.append("Within-subtitle duration of %5.2f seconds" % within_subtitle_time) elif within_subtitle_time == 0.: logging.debug("Subtitle flies by too fast (%s --> %s)." % times[i]) #print "Start: %s\tB: %5.2f\tW: %5.2f" % (parse_time(times[i][0]), between_subtitle_time, within_subtitle_time) except Exception as e: if not times[i][1].startswith('99:59:59'): srt_issues.append("Error checking times: %s" % e) else: if len(times) - i > 1 and len(times) - i - 1 > len(times)/10.: if i == 0: srt_issues.append("No subtitles have a valid starting point.") else: logging.debug("Hit end of movie, but %d (of %d) subtitle(s) remain in the queue." % (len(times) - i - 1, len(times))) break
def add_to_summary(sender, **kwargs): assert UserLog.is_enabled(), "We shouldn't be saving unless UserLog is enabled." instance = kwargs["instance"] if not instance.start_datetime: raise ValidationError("start_datetime cannot be None") if instance.last_active_datetime and instance.start_datetime > instance.last_active_datetime: raise ValidationError("UserLog date consistency check for start_datetime and last_active_datetime") if instance.end_datetime and not instance.total_seconds: # Compute total_seconds, save to summary # Note: only supports setting end_datetime once! instance.full_clean() # The top computation is more lenient: user activity is just time logged in, literally. # The bottom computation is more strict: user activity is from start until the last "action" # recorded--in the current case, that means from login until the last moment an exercise or # video log was updated. #instance.total_seconds = datediff(instance.end_datetime, instance.start_datetime, units="seconds") instance.total_seconds = 0 if not instance.last_active_datetime else datediff(instance.last_active_datetime, instance.start_datetime, units="seconds") # Confirm the result (output info first for easier debugging) if instance.total_seconds < 0: raise ValidationError("Total learning time should always be non-negative.") logging.debug("%s: total time (%d): %d seconds" % (instance.user.username, instance.activity_type, instance.total_seconds)) # Save only completed log items to the UserLogSummary UserLogSummary.add_log_to_summary(instance)
def login(request, facility): facility_id = facility and facility.id or None facilities = list(Facility.objects.all()) # Fix for #1211: refresh cached facility info when it's free and relevant refresh_session_facility_info(request, facility_count=len(facilities)) if request.method == 'POST': # log out any Django user or facility user logout(request) username = request.POST.get("username", "") password = request.POST.get("password", "") # first try logging in as a Django user user = authenticate(username=username, password=password) if user: auth_login(request, user) return HttpResponseRedirect(request.next or reverse("easy_admin")) # try logging in as a facility user form = LoginForm(data=request.POST, request=request, initial={"facility": facility_id}) if form.is_valid(): user = form.get_user() try: UserLog.begin_user_activity(user, activity_type="login", language=request.language) # Success! Log the event (ignoring validation failures) except ValidationError as e: logging.error("Failed to begin_user_activity upon login: %s" % e) request.session["facility_user"] = user messages.success(request, _("You've been logged in! We hope you enjoy your time with KA Lite ") + _("-- be sure to log out when you finish.")) # Send them back from whence they came landing_page = form.cleaned_data["callback_url"] if not landing_page: # Just going back to the homepage? We can do better than that. landing_page = reverse("coach_reports") if form.get_user().is_teacher else None landing_page = landing_page or (reverse("account_management") if not settings.package_selected("RPi") else reverse("homepage")) return HttpResponseRedirect(form.non_field_errors() or request.next or landing_page) else: messages.error( request, _("There was an error logging you in. Please correct any errors listed below, and try again."), ) else: # render the unbound login form referer = urlparse.urlparse(request.META["HTTP_REFERER"]).path if request.META.get("HTTP_REFERER") else None # never use the homepage as the referer if referer in [reverse("homepage"), reverse("add_facility_student")]: referer = None form = LoginForm(initial={"facility": facility_id, "callback_url": referer}) return { "form": form, "facilities": facilities, }
def begin_user_activity(cls, user, activity_type="login", start_datetime=None, language=None, suppress_save=False): """Helper function to create a user activity log entry.""" # Do nothing if the max # of records is zero # (i.e. this functionality is disabled) if not cls.is_enabled(): return if not user: raise ValidationError("A valid user must always be specified.") if not start_datetime: # must be done outside the function header (else becomes static) start_datetime = datetime.now() activity_type = cls.get_activity_int(activity_type) cur_log = cls.get_latest_open_log_or_None(user=user, activity_type=activity_type) if cur_log: # Seems we're logging in without logging out of the previous. # Best thing to do is simulate a login # at the previous last update time. # # Note: this can be a recursive call logging.warn("%s: had to END activity on a begin(%d) @ %s" % (user.username, activity_type, start_datetime)) # Don't mark current language when closing an old one cls.end_user_activity(user=user, activity_type=activity_type, end_datetime=cur_log.last_active_datetime) # can't suppress save cur_log = None # Create a new entry logging.debug("%s: BEGIN activity(%d) @ %s" % (user.username, activity_type, start_datetime)) cur_log = cls(user=user, activity_type=activity_type, start_datetime=start_datetime, last_active_datetime=start_datetime, language=language) if not suppress_save: cur_log.save() return cur_log
def get_cache_key(path=None, url_name=None, cache=None, failure_ok=False): """Call into Django to retrieve a cache key for the given url, or given url name NOTE: ONLY RETURNS CACHE_KEY IF THE CACHE_ITEM HAS BEEN CREATED ELSEWHERE!!!""" assert (path or url_name) and not ( path and url_name), "Must have path or url_name parameter, but not both" if not cache: cache = get_web_cache() request = HttpRequest() request.path = path or reverse(url_name) request.session = { settings.LANGUAGE_COOKIE_NAME: translation.get_language() } cache_key = django_get_cache_key(request, cache=get_web_cache()) if not cache_key and not failure_ok: logging.warn( "The cache item does not exist, and so could not be retrieved (path=%s)." % request.path) return cache_key
def save(self, update_userlog=True, *args, **kwargs): # To deal with backwards compatibility, # check video_id, whether imported or not. if not self.video_id: assert kwargs.get("imported", False), "video_id better be set by internal code." assert self.youtube_id, "If not video_id, you better have set youtube_id!" self.video_id = i18n.get_video_id(self.youtube_id) or self.youtube_id # for unknown videos, default to the youtube_id if not kwargs.get("imported", False): self.full_clean() # Compute learner status already_complete = self.complete self.complete = (self.points >= VideoLog.POINTS_PER_VIDEO) if not already_complete and self.complete: self.completion_timestamp = datetime.now() # Tell logins that they are still active (ignoring validation failures). # TODO(bcipolli): Could log video information in the future. if update_userlog: try: UserLog.update_user_activity(self.user, activity_type="login", update_datetime=(self.completion_timestamp or datetime.now()), language=self.language) except ValidationError as e: logging.error("Failed to update userlog during video: %s" % e) super(VideoLog, self).save(*args, **kwargs)
def sync_device_records(self): server_counters = self.get_server_device_counters() client_counters = self.get_client_device_counters() devices_to_download = [] devices_to_upload = [] counters_to_download = {} counters_to_upload = {} for device_id in client_counters: if device_id not in server_counters: devices_to_upload.append(device_id) counters_to_upload[device_id] = 0 elif client_counters[device_id] > server_counters[device_id]: counters_to_upload[device_id] = server_counters[device_id] for device_id in server_counters: if device_id not in client_counters: devices_to_download.append(device_id) counters_to_download[device_id] = 0 elif server_counters[device_id] > client_counters[device_id]: counters_to_download[device_id] = client_counters[device_id] response = json.loads( self.post("device/download", { "devices": devices_to_download }).content) # As usual, we're deserializing from the central server, so we assume that what we're getting # is "smartly" dumbed down for us. We don't need to specify the src_version, as it's # pre-cleaned for us. download_results = save_serialized_models(response.get( "devices", "[]"), increment_counters=False) # BUGFIX(bcipolli) metadata only gets created if models are # streamed; if a device is downloaded but no models are downloaded, # metadata does not exist. Let's just force it here. for device_id in devices_to_download: # force try: d = Device.objects.get(id=device_id) except Exception as e: logging.error( "Exception locating device %s for metadata creation: %s" % (device_id, e)) continue if not d.get_counter_position( ): # this would be nonzero if the device sync'd models d.set_counter_position(counters_to_download[device_id]) self.session.models_downloaded += download_results["saved_model_count"] self.session.errors += download_results.has_key("error") self.session.save() # TODO(jamalex): upload local devices as well? only needed once we have P2P syncing return (counters_to_download, counters_to_upload)
def download_subtitle(youtube_id, lang_code, format="srt"): """ Return subtitles for YouTube ID in language specified. Return False if they do not exist. Update local JSON accordingly. Note: srt map deals with amara, so uses ietf codes (e.g. en-us) """ assert format == "srt", "We only support srt download at the moment." # srt map deals with amara, so uses ietf codes (e.g. en-us) with open(SRTS_JSON_FILEPATH, "r") as fp: api_info_map = json.load(fp) # get amara id amara_code = api_info_map.get(youtube_id).get("amara_code") # make request # Please see http://amara.readthedocs.org/en/latest/api.html base_url = "https://amara.org/api2/partners/videos" resp = make_request( AMARA_HEADERS, "%s/%s/languages/%s/subtitles/?format=srt" % (base_url, amara_code, lang_code.lower()) ) if isinstance(resp, basestring) or not resp: return resp else: # return the subtitle text, replacing empty subtitle lines with # spaces to make the FLV player happy try: resp.encoding = "UTF-8" response = (resp.text or u"").replace("\n\n\n", "\n \n\n").replace("\r\n\r\n\r\n", "\r\n \r\n\r\n") except Exception as e: logging.error(e) response = "client-error" return response
def download_subtitle(youtube_id, lang_code, format="srt"): """Return subtitles for YouTube ID in language specified. Return False if they do not exist. Update local JSON accordingly.""" assert format == "srt", "We only support srt download at the moment." api_info_map = json.loads( open(settings.SUBTITLES_DATA_ROOT + SRTS_JSON_FILENAME).read()) # get amara id amara_code = api_info_map.get(youtube_id).get("amara_code") # make request # Please see http://amara.readthedocs.org/en/latest/api.html base_url = "https://amara.org/api2/partners/videos" r = make_request( headers, "%s/%s/languages/%s/subtitles/?format=srt" % (base_url, amara_code, lang_code)) if isinstance(r, basestring): return r else: # return the subtitle text, replacing empty subtitle lines with # spaces to make the FLV player happy try: r.encoding = "UTF-8" response = (r.text or u"") \ .replace("\n\n\n", "\n \n\n") \ .replace("\r\n\r\n\r\n", "\r\n \r\n\r\n") except Exception as e: logging.error(e) response = "client-error" return response
def download_subtitle(youtube_id, lang_code, format="srt"): """Return subtitles for YouTube ID in language specified. Return False if they do not exist. Update local JSON accordingly.""" assert format == "srt", "We only support srt download at the moment." api_info_map = json.loads( open(settings.SUBTITLES_DATA_ROOT + SRTS_JSON_FILENAME).read() ) # get amara id amara_code = api_info_map.get(youtube_id).get("amara_code") # make request # Please see http://amara.readthedocs.org/en/latest/api.html base_url = "https://amara.org/api2/partners/videos" r = make_request(headers, "%s/%s/languages/%s/subtitles/?format=srt" % ( base_url, amara_code, lang_code)) if isinstance(r, basestring): return r else: # return the subtitle text, replacing empty subtitle lines with # spaces to make the FLV player happy try: r.encoding = "UTF-8" response = (r.text or u"") \ .replace("\n\n\n", "\n \n\n") \ .replace("\r\n\r\n\r\n", "\r\n \r\n\r\n") except Exception as e: logging.error(e) response = "client-error" return response
def update_json(youtube_id, lang_code, downloaded, api_response, time_of_attempt): """Update language_srt_map to reflect download status""" # Open JSON file filepath = get_lang_map_filepath(lang_code) try: language_srt_map = json.loads(open(filepath).read()) except Exception as e: logging.error( "Something went wrong while trying to open the json file (%s): %s" % (filepath, e)) return False # create updated entry entry = language_srt_map[youtube_id] entry["downloaded"] = downloaded entry["api_response"] = api_response entry["last_attempt"] = time_of_attempt if api_response == "success": entry["last_success"] = time_of_attempt # update full-size JSON with new information language_srt_map[youtube_id].update(entry) # write it to file logging.info("File updated.") json_file = open(filepath, "wb") json_file.write(json.dumps(language_srt_map)) json_file.close() return True
def recurse_nodes_to_extract_knowledge_map(node, node_cache): """ Internal function for recursing the topic tree and building the knowledge map. Requires rebranding of metadata done by recurse_nodes function. """ assert node["kind"] == "Topic" if node.get("in_knowledge_map", None): if node["slug"] not in knowledge_map["topics"]: logging.debug("Not in knowledge map: %s" % node["slug"]) node["in_knowledge_map"] = False for node in node_cache["Topic"][node["slug"]]: node["in_knowledge_map"] = False knowledge_topics[node["slug"]] = topic_tools.get_all_leaves(node, leaf_type="Exercise") if not knowledge_topics[node["slug"]]: sys.stderr.write("Removing topic from topic tree: no exercises. %s" % node["slug"]) del knowledge_topics[node["slug"]] del knowledge_map["topics"][node["slug"]] node["in_knowledge_map"] = False for node in node_cache["Topic"][node["slug"]]: node["in_knowledge_map"] = False else: if node["slug"] in knowledge_map["topics"]: sys.stderr.write("Removing topic from topic tree; does not belong. '%s'" % node["slug"]) logging.warn("Removing from knowledge map: %s" % node["slug"]) del knowledge_map["topics"][node["slug"]] for child in [n for n in node.get("children", []) if n["kind"] == "Topic"]: recurse_nodes_to_extract_knowledge_map(child, node_cache)
def recurse_nodes_to_delete_exercise(node, OLD_NODE_CACHE): """ Internal function for recursing the topic tree and removing new exercises. Requires rebranding of metadata done by recurse_nodes function. """ # Stop recursing when we hit leaves if node["kind"] != "Topic": return children_to_delete = [] for ci, child in enumerate(node.get("children", [])): # Mark all unrecognized exercises for deletion if child["kind"] == "Exercise": if not child["slug"] in OLD_NODE_CACHE["Exercise"].keys(): children_to_delete.append(ci) # Recurse over children to delete elif child.get("children", None): recurse_nodes_to_delete_exercise(child, OLD_NODE_CACHE) # Delete children without children (all their children were removed) if not child.get("children", None): logging.debug("Removing now-childless topic node '%s'" % child["slug"]) children_to_delete.append(ci) # If there are no longer exercises, be honest about it elif not any([ch["kind"] == "Exercise" or "Exercise" in ch.get("contains", []) for ch in child["children"]]): child["contains"] = list(set(child["contains"]) - set(["Exercise"])) # Do the actual deletion for i in reversed(children_to_delete): logging.debug("Deleting unknown exercise %s" % node["children"][i]["slug"]) del node["children"][i]
def __init__(self, comment=None, fixture=None, **kwargs): self.return_dict = {} self.return_dict['comment'] = comment self.return_dict['class'] = type(self).__name__ self.return_dict['uname'] = platform.uname() self.return_dict['fixture'] = fixture self.verbosity = int(kwargs.get("verbosity")) try: branch = subprocess.Popen( ["git", "describe", "--contains", "--all", "HEAD"], stdout=subprocess.PIPE).communicate()[0] self.return_dict['branch'] = branch[:-1] head = subprocess.Popen([ "git", "log", "--pretty=oneline", "--abbrev-commit", "--max-count=1" ], stdout=subprocess.PIPE).communicate()[0] self.return_dict['head'] = head[:-1] except: self.return_dict['branch'] = None self.return_dict['head'] = None # if setup fails, what could we do? try: self._setup(**kwargs) except Exception as e: logging.error(e)
def zip_language_packs(lang_codes=None): """Zip up and expose all language packs converts all into ietf """ lang_codes = lang_codes or os.listdir(LOCALE_ROOT) lang_codes = [lcode_to_ietf(lc) for lc in lang_codes] logging.info("Zipping up %d language pack(s)" % len(lang_codes)) for lang_code_ietf in lang_codes: lang_code_django = lcode_to_django_dir(lang_code_ietf) lang_locale_path = os.path.join(LOCALE_ROOT, lang_code_django) if not os.path.exists(lang_locale_path): logging.warn("Unexpectedly skipping missing directory: %s" % lang_code_django) elif not os.path.isdir(lang_locale_path): logging.error("Skipping language where a file exists where a directory was expected: %s" % lang_code_django) # Create a zipfile for this language zip_filepath = get_language_pack_filepath(lang_code_ietf) ensure_dir(os.path.dirname(zip_filepath)) logging.info("Creating zip file in %s" % zip_filepath) z = zipfile.ZipFile(zip_filepath, 'w', zipfile.ZIP_DEFLATED) # Get every single file in the directory and zip it up for metadata_file in glob.glob('%s/*.json' % lang_locale_path): z.write(os.path.join(lang_locale_path, metadata_file), arcname=os.path.basename(metadata_file)) srt_dirpath = get_srt_path(lang_code_django) for srt_file in glob.glob(os.path.join(srt_dirpath, "*.srt")): z.write(srt_file, arcname=os.path.join("subtitles", os.path.basename(srt_file))) z.close() logging.info("Done.")
def handle(self, *args, **options): if settings.CENTRAL_SERVER: raise CommandError( "Run this command on the distributed server only.") # Load videos video_sizes = softload_json(REMOTE_VIDEO_SIZE_FILEPATH, logger=logging.debug) # Query current files all_video_filepaths = glob.glob( os.path.join(settings.CONTENT_ROOT, "*.mp4")) logging.info("Querying sizes for %d video(s)." % len(all_video_filepaths)) # Get all current sizes for video_filepath in all_video_filepaths: youtube_id = os.path.splitext(os.path.basename(video_filepath))[0] # Set to max, so that local compressed videos will not affect things. video_sizes[youtube_id] = max(video_sizes.get(youtube_id, 0), os.path.getsize(video_filepath)) # Sort results video_sizes = OrderedDict([(key, video_sizes[key]) for key in sorted(video_sizes.keys())]) logging.info("Saving results to disk.") ensure_dir(os.path.dirname(REMOTE_VIDEO_SIZE_FILEPATH)) with open(REMOTE_VIDEO_SIZE_FILEPATH, "w") as fp: json.dump(video_sizes, fp, indent=2)
def generate_fake_coachreport_logs(): teacher_password = make_password('hellothere') t,_ = FacilityUser.objects.get_or_create( facility=Facility.objects.all()[0], username=random.choice(firstnames), defaults={ 'password' : teacher_password, 'is_teacher' : True, } ) # TODO: create flags later num_logs = 20 logs = [] for _ in xrange(num_logs): date_logged_in = datetime.datetime.now() - datetime.timedelta(days=random.randint(1,10)) date_viewed_coachreport = date_logged_in + datetime.timedelta(minutes=random.randint(0, 30)) date_logged_out = date_viewed_coachreport + datetime.timedelta(minutes=random.randint(0, 30)) login_log = UserLog.objects.create( user=t, activity_type=UserLog.get_activity_int("login"), start_datetime=date_logged_in, last_active_datetime=date_viewed_coachreport, end_datetime=date_logged_out, ) logging.info("created login log for teacher %s" % t.username) coachreport_log = UserLog.objects.create( user=t, activity_type=UserLog.get_activity_int("coachreport"), start_datetime=date_viewed_coachreport, last_active_datetime=date_viewed_coachreport, end_datetime=date_viewed_coachreport, ) logs.append((login_log, coachreport_log)) logging.info("created coachreport log for teacher %s" % t.username) return logs
def select_best_available_language(target_code, available_codes=None): """ Critical function for choosing the best available language for a resource, given a target language code. This is used by video and exercise pages, for example, to determine what file to serve, based on available resources and the current requested language. """ # Scrub the input target_code = lcode_to_django_lang(target_code) if available_codes is None: available_codes = get_installed_language_packs().keys() available_codes = [lcode_to_django_lang(lc) for lc in available_codes] # Hierarchy of language selection if target_code in available_codes: actual_code = target_code elif target_code.split("-", 1)[0] in available_codes: actual_code = target_code.split("-", 1)[0] elif settings.LANGUAGE_CODE in available_codes: actual_code = settings.LANGUAGE_CODE elif "en" in available_codes: actual_code = "en" elif available_codes: actual_code = available_codes[0] else: actual_code = None if actual_code != target_code: logging.debug("Requested code %s, got code %s" % (target_code, actual_code)) return actual_code
def generate_fake_coachreport_logs(password="******"): t,_ = FacilityUser.objects.get_or_create( facility=Facility.objects.all()[0], username=random.choice(firstnames) ) t.set_password(password) # TODO: create flags later num_logs = 20 logs = [] for _ in xrange(num_logs): date_logged_in = datetime.datetime.now() - datetime.timedelta(days=random.randint(1,10)) date_viewed_coachreport = date_logged_in + datetime.timedelta(minutes=random.randint(0, 30)) date_logged_out = date_viewed_coachreport + datetime.timedelta(minutes=random.randint(0, 30)) login_log = UserLog.objects.create( user=t, activity_type=UserLog.get_activity_int("login"), start_datetime=date_logged_in, last_active_datetime=date_viewed_coachreport, end_datetime=date_logged_out, ) logging.info("created login log for teacher %s" % t.username) coachreport_log = UserLog.objects.create( user=t, activity_type=UserLog.get_activity_int("coachreport"), start_datetime=date_viewed_coachreport, last_active_datetime=date_viewed_coachreport, end_datetime=date_viewed_coachreport, ) logs.append((login_log, coachreport_log)) logging.info("created coachreport log for teacher %s" % t.username) return logs
def handle(self, *args, **options): logging.basicConfig(stream=sys.stdout, datefmt="%Y-%m-%d %H:%M:%S", format="[%(asctime)-15s] %(message)s") try: time_wait = getattr( settings, "CRONSERVER_FREQUENCY", 60) if not args or not args[0].strip() else float(args[0]) except: raise CommandError("Invalid wait time: %s is not a number." % args[0]) try: sys.stdout.write( "Starting cronserver. Jobs will run every %d seconds.\n" % time_wait) #sys.stdout.write("Quit the server with CONTROL-C.\n") # Run server until killed while True: thread = CronThread(gc=options.get("gc", False), mp=options.get("prof", False)) thread.start() sleep(time_wait) except KeyboardInterrupt: logger.info("Exiting...\n") sys.exit()
def end_user_activity(cls, user, activity_type="login", end_datetime=None): """Helper function to complete an existing user activity log entry.""" # Do nothing if the max # of records is zero or None # (i.e. this functionality is disabled) if not settings.USER_LOG_MAX_RECORDS: return assert user is not None, "A valid user must always be specified." if not end_datetime: # must be done outside the function header (else becomes static) end_datetime = datetime.now() activity_type = cls.get_activity_int(activity_type) cur_user_log_entry = get_object_or_None(cls, user=user, end_datetime=None) # No unstopped starts. Start should have been called first! if not cur_user_log_entry: logging.warn( "%s: Had to create a user log entry, but STOPPING('%d')! @ %s" % (user.username, activity_type, end_datetime)) cur_user_log_entry = cls.begin_user_activity( user=user, activity_type=activity_type, start_datetime=end_datetime) logging.debug("%s: Logging LOGOUT activity @ %s" % (user.username, end_datetime)) cur_user_log_entry.end_datetime = end_datetime cur_user_log_entry.save() # total-seconds will be computed here.
def update_user_activity(cls, user, activity_type="login", update_datetime=None, language=None, suppress_save=False): """Helper function to update an existing user activity log entry.""" # Do nothing if the max # of records is zero # (i.e. this functionality is disabled) if not cls.is_enabled(): return if not user: raise ValidationError("A valid user must always be specified.") if not update_datetime: # must be done outside the function header (else becomes static) update_datetime = datetime.now() activity_type = cls.get_activity_int(activity_type) cur_log = cls.get_latest_open_log_or_None(user=user, activity_type=activity_type) if cur_log: # How could you start after you updated?? if cur_log.start_datetime > update_datetime: raise ValidationError("Update time must always be later than the login time.") else: # No unstopped starts. Start should have been called first! logging.warn("%s: Had to create a user log entry on an UPDATE(%d)! @ %s" % (user.username, activity_type, update_datetime)) cur_log = cls.begin_user_activity(user=user, activity_type=activity_type, start_datetime=update_datetime, suppress_save=True) logging.debug("%s: UPDATE activity (%d) @ %s" % (user.username, activity_type, update_datetime)) cur_log.last_active_datetime = update_datetime cur_log.language = language or cur_log.language # set the language to the current language, if there is one. if not suppress_save: cur_log.save() return cur_log
async def proxy(request: Request) -> web.Response: def prepare_headers(headers): headers['host'] = PROXY_SITE headers['Accept-Encoding'] = 'deflate' return headers def response_body(raw: bytes) -> bytes: content = raw.decode() soup = BeautifulSoup( content, "html.parser") # OPTIMIZE: use lxml instead html.parser for processor, kwargs in PROCESSORS.items(): func = getattr(processors, processor) soup = func(soup, **kwargs) return str.encode(str(soup)) url = '{site}{url}'.format(site=PROXY_URL, url=request.match_info['path']) async with aiohttp.ClientSession() as session: async with session.request(request.method, url, headers=prepare_headers(request.headers), params=request.rel_url.query, data=await request.read()) as resp: LOG.debug("Got %s response from %s", resp.status, url) raw = await resp.read() if 'text/html' in resp.headers['Content-Type']: # exclude static from parsing raw = response_body(raw) response = web.Response(body=raw, status=resp.status, headers=resp.headers) response.enable_chunked_encoding() return response
def process_request(self, request): next = request.GET.get("next", "") if next.startswith("/"): logging.debug("next='%s'" % next) request.next = next else: request.next = ""
def add_syncing_models(models): """When sync is run, these models will be sync'd""" get_foreign_key_classes = lambda m: set([field.rel.to for field in m._meta.fields if isinstance(field, ForeignKey)]) for model in models: if model in _syncing_models: logging.warn("We are already syncing model %s" % unicode(model)) continue # When we add models to be synced, we need to make sure # that models that depend on other models are synced AFTER # the model it depends on has been synced. # Get the dependencies of the new model foreign_key_classes = get_foreign_key_classes(model) # Find all the existing models that this new model refers to. class_indices = [_syncing_models.index(cls) for cls in foreign_key_classes if cls in _syncing_models] # Insert just after the last dependency found, # or at the front if no dependencies insert_after_idx = 1 + (max(class_indices) if class_indices else -1) # Before inserting, make sure that any models referencing *THIS* model # appear after this model. if [True for synmod in _syncing_models[0 : insert_after_idx - 1] if model in get_foreign_key_classes(synmod)]: raise Exception("Dependency loop detected in syncing models; cannot proceed.") # Now we're ready to insert. _syncing_models.insert(insert_after_idx + 1, model)
def begin_user_activity(cls, user, activity_type="login", start_datetime=None): """Helper function to create a user activity log entry.""" # Do nothing if the max # of records is zero or None # (i.e. this functionality is disabled) if not settings.USER_LOG_MAX_RECORDS: return assert user is not None, "A valid user must always be specified." if not start_datetime: # must be done outside the function header (else becomes static) start_datetime = datetime.now() activity_type = cls.get_activity_int(activity_type) cur_user_log_entry = get_object_or_None(cls, user=user, end_datetime=None) logging.debug("%s: BEGIN activity(%d) @ %s" % (user.username, activity_type, start_datetime)) # Seems we're logging in without logging out of the previous. # Best thing to do is simulate a login # at the previous last update time. # # Note: this can be a recursive call if cur_user_log_entry: logging.warn("%s: END activity on a begin @ %s" % (user.username, start_datetime)) cls.end_user_activity( user=user, activity_type=activity_type, end_datetime=cur_user_log_entry.last_active_datetime ) # Create a new entry cur_user_log_entry = cls( user=user, activity_type=activity_type, start_datetime=start_datetime, last_active_datetime=start_datetime ) cur_user_log_entry.save() return cur_user_log_entry
def download_kmap_icons(knowledge_map): for key, value in knowledge_map["topics"].items(): # Note: id here is retrieved from knowledge_map, so we're OK # that we blew away ID in the topic tree earlier. if "icon_url" not in value: logging.warn("No icon URL for %s" % key) value["icon_url"] = iconfilepath + value["id"] + iconextension knowledge_map["topics"][key] = value out_path = data_path + "../" + value["icon_url"] if os.path.exists(out_path) and not force_icons: continue icon_khan_url = "http://www.khanacademy.org" + value["icon_url"] sys.stdout.write("Downloading icon %s from %s..." % (value["id"], icon_khan_url)) sys.stdout.flush() try: icon = requests.get(icon_khan_url) except Exception as e: sys.stdout.write("\n") # complete the "downloading" output sys.stderr.write("Failed to download %-80s: %s\n" % (icon_khan_url, e)) continue if icon.status_code == 200: iconfile = file(data_path + "../" + value["icon_url"], "w") iconfile.write(icon.content) else: sys.stdout.write(" [NOT FOUND]") value["icon_url"] = iconfilepath + defaulticon + iconextension sys.stdout.write(" done.\n") # complete the "downloading" output
def end_user_activity(cls, user, activity_type="login", end_datetime=None): """Helper function to complete an existing user activity log entry.""" # Do nothing if the max # of records is zero or None # (i.e. this functionality is disabled) if not settings.USER_LOG_MAX_RECORDS: return assert user is not None, "A valid user must always be specified." if not end_datetime: # must be done outside the function header (else becomes static) end_datetime = datetime.now() activity_type = cls.get_activity_int(activity_type) cur_user_log_entry = get_object_or_None(cls, user=user, end_datetime=None) # No unstopped starts. Start should have been called first! if not cur_user_log_entry: logging.warn( "%s: Had to create a user log entry, but STOPPING('%d')! @ %s" % (user.username, activity_type, end_datetime) ) cur_user_log_entry = cls.begin_user_activity( user=user, activity_type=activity_type, start_datetime=end_datetime ) logging.debug("%s: Logging LOGOUT activity @ %s" % (user.username, end_datetime)) cur_user_log_entry.end_datetime = end_datetime cur_user_log_entry.save() # total-seconds will be computed here.
def save(self, update_userlog=True, *args, **kwargs): if not kwargs.get("imported", False): self.full_clean() # Compute learner status if self.attempts > 20 and not self.complete: self.struggling = True already_complete = self.complete self.complete = (self.streak_progress >= 100) if not already_complete and self.complete: self.struggling = False self.completion_timestamp = datetime.now() self.attempts_before_completion = self.attempts # Tell logins that they are still active (ignoring validation failures). # TODO(bcipolli): Could log exercise information in the future. if update_userlog: try: UserLog.update_user_activity( self.user, activity_type="login", update_datetime=(self.completion_timestamp or datetime.now()), language=self.language) except ValidationError as e: logging.error( "Failed to update userlog during exercise: %s" % e) super(ExerciseLog, self).save(*args, **kwargs)
def refresh_topic_cache_wrapper_fn(request, cached_nodes={}, *args, **kwargs): """ Centralized logic for how to refresh the topic cache, for each type of object. When the object is desired to be used, this code runs to refresh data, balancing between correctness and efficiency. """ if not cached_nodes: cached_nodes = {"topics": topicdata.TOPICS} for node in cached_nodes.values(): if not node: continue has_children = bool(node.get("children")) has_grandchildren = has_children and any(["children" in child for child in node["children"]]) # Propertes not yet marked if node["kind"] == "Video": if force or "urls" not in node: # #stamp_urls_on_video(node, force=force) # will be done by force below recount_videos_and_invalidate_parents(node["parent"], force=True) elif node["kind"] == "Topic": if not force and (not has_grandchildren or "nvideos_local" not in node): # if forcing, would do this here, and again below--so skip if forcing. logging.debug("cache miss: stamping urls on videos") for video in topic_tools.get_topic_videos(path=node["path"]): stamp_urls_on_video(video, force=force) recount_videos_and_invalidate_parents(node, force=force or not has_grandchildren) kwargs.update(cached_nodes) return handler(request, *args, **kwargs)
def set_cached_password(cls, user, raw_password): assert user.id, "Your user must have an ID before calling this function." if not cls.is_enabled(): # Must delete, to make sure we don't get out of sync. cls.invalidate_cached_password(user=user) else: try: # Set the cached password. n_cached_iters = cls.iters_for_user_type(user) # TODO(bcipolli) Migrate this to an extended django class # that uses get_or_initialize cached_password = get_object_or_None( cls, user=user) or cls(user=user) cached_password.password = crypt(raw_password, iterations=n_cached_iters) cached_password.save() logging.debug( "Set cached password for user=%s; iterations=%d" % (user.username, n_cached_iters)) except Exception as e: # If we fail to create a cache item... just keep going--functionality # can still move forward. logging.error(e)
def add_syncing_models(models): """When sync is run, these models will be sync'd""" for model in models: if model in _syncing_models: logging.warn("We are already syncing model %s" % str(model)) else: _syncing_models.append(model)
def update_json(youtube_id, lang_code, downloaded, api_response, time_of_attempt): """Update language_srt_map to reflect download status""" # Open JSON file filepath = get_lang_map_filepath(lang_code) try: language_srt_map = json.loads(open(filepath).read()) except Exception as e: logging.error("Something went wrong while trying to open the json file (%s): %s" % (filepath, e)) return False # create updated entry entry = language_srt_map[youtube_id] entry["downloaded"] = downloaded entry["api_response"] = api_response entry["last_attempt"] = time_of_attempt if api_response == "success": entry["last_success"] = time_of_attempt # update full-size JSON with new information language_srt_map[youtube_id].update(entry) # write it to file logging.info("File updated.") json_file = open(filepath, "wb") json_file.write(json.dumps(language_srt_map)) json_file.close() return True
def verify_inner_zip(self, zip_file): """ Extract contents of outer zip, verify the inner zip """ zip = ZipFile(zip_file, "r") nfiles = len(zip.namelist()) for fi, afile in enumerate(zip.namelist()): zip.extract(afile, path=self.working_dir) self.signature_file = os.path.join(self.working_dir, Command.signature_filename) self.inner_zip_file = os.path.join(self.working_dir, Command.inner_zip_filename) central_server = Device.get_central_server() lines = open(self.signature_file, "r").read().split("\n") chunk_size = int(lines.pop(0)) if not central_server: logging.warn( "No central server device object found; trusting zip file because you asked me to..." ) elif central_server.key.verify_large_file(self.inner_zip_file, signature=lines, chunk_size=chunk_size): logging.info("Verified file!") else: raise Exception("Failed to verify inner zip file.") return self.inner_zip_file
def recurse_nodes_to_clean_related_videos(node): """ Internal function for recursing the topic tree and marking related exercises. Requires rebranding of metadata done by recurse_nodes function. """ def get_video_node(video_slug, node): if node["kind"] == "Topic": for child in node.get("children", []): video_node = get_video_node(video_slug, child) if video_node: return video_node elif node["kind"] == "Video" and node["slug"] == video_slug: return node return None if node["kind"] == "Exercise": videos_to_delete = [] for vi, video_slug in enumerate(node["related_video_slugs"]): if not get_video_node(video_slug, topictree): videos_to_delete.append(vi) for vi in reversed(videos_to_delete): logging.warn("Deleting unknown video %s" % node["related_video_slugs"][vi]) del node["related_video_slugs"][vi] for child in node.get("children", []): recurse_nodes_to_clean_related_videos(child)
def add_log_to_summary(cls, user_log, device=None): """Adds total_time to the appropriate user/device/activity's summary log.""" assert user_log.end_datetime, "all log items must have an end_datetime to be saved here." assert user_log.total_seconds >= 0, "all log items must have a non-negative total_seconds to be saved here." device = device or Device.get_own_device() # Must be done here, or install fails # Check for an existing object log_summary = cls.objects.filter( device=device, user=user_log.user, activity_type=user_log.activity_type, start_datetime__lte=user_log.end_datetime, end_datetime__gte=user_log.end_datetime, ) assert log_summary.count() <= 1, "There should never be multiple summaries in the same time period/device/user/type combo" # Get (or create) the log item log_summary = log_summary[0] if log_summary.count() else cls( device=device, user=user_log.user, activity_type=user_log.activity_type, start_datetime=cls.get_period_start_datetime(user_log.end_datetime, settings.USER_LOG_SUMMARY_FREQUENCY), end_datetime=cls.get_period_end_datetime(user_log.end_datetime, settings.USER_LOG_SUMMARY_FREQUENCY), total_seconds=0, count=0, ) logging.debug("Adding %d seconds for %s/%s/%d, period %s to %s" % (user_log.total_seconds, device.name, user_log.user.username, user_log.activity_type, log_summary.start_datetime, log_summary.end_datetime)) # Add the latest info log_summary.total_seconds += user_log.total_seconds log_summary.count += 1 log_summary.save()
def update_json(youtube_id, lang_code, downloaded, api_response, time_of_attempt): """Update language_srt_map to reflect download status lang_code in IETF format """ # Open JSON file filepath = get_lang_map_filepath(lang_code) language_srt_map = softload_json(filepath, logger=logging.error) if not language_srt_map: return False # create updated entry entry = language_srt_map[youtube_id] entry["downloaded"] = downloaded entry["api_response"] = api_response entry["last_attempt"] = time_of_attempt if api_response == "success": entry["last_success"] = time_of_attempt # update full-size JSON with new information language_srt_map[youtube_id].update(entry) # write it to file json_file = open(filepath, "wb") json_file.write(json.dumps(language_srt_map)) json_file.close() logging.debug("File updated.") return True