def get_playlist_entry_ids(cls, playlist): """Return a tuple of the playlist's video ids and exercise ids as sets""" playlist_entries = playlist.get("entries") or playlist.get("children") # TODO(dylanjbarth): 0.13 playlist entities shouldn't have the /v or /e in them at all. pl_video_ids = set([get_slug2id_map().get(entry.get("entity_id")) or entry.get("id") for entry in playlist_entries if entry.get("entity_kind") == "Video"]) pl_exercise_ids = set([entry.get("entity_id") or entry.get("id") for entry in playlist_entries if (entry.get("entity_kind") or entry.get("kind")) == "Exercise"]) return (pl_video_ids, pl_exercise_ids)
def obj_get(self, bundle, **kwargs): playlists = Playlist.all() pk = kwargs['pk'] content_dict = get_content_cache() for playlist in playlists: if str(playlist.id) == pk: # Add the full titles onto the playlist entries playlist.entries = [PlaylistEntry.add_full_title_from_topic_tree(entry, content_dict) for entry in playlist.entries] for entry in playlist.entries: if entry["entity_kind"] == "Video": entry["youtube_id"] = get_slug2id_map()[entry["entity_id"]] return playlist else: raise NotFound('Playlist with pk %s not found' % pk)
def obj_get(self, bundle, **kwargs): playlists = Playlist.all() pk = kwargs['pk'] content_dict = get_content_cache() for playlist in playlists: if str(playlist.id) == pk: # Add the full titles onto the playlist entries playlist.entries = [ PlaylistEntry.add_full_title_from_topic_tree( entry, content_dict) for entry in playlist.entries ] for entry in playlist.entries: if entry["entity_kind"] == "Video": entry["youtube_id"] = get_slug2id_map()[ entry["entity_id"]] return playlist else: raise NotFound('Playlist with pk %s not found' % pk)
def get_playlist_entry_ids(cls, playlist): """Return a tuple of the playlist's video ids and exercise ids as sets""" playlist_entries = playlist.get("entries") or playlist.get("children") # TODO(dylanjbarth): 0.13 playlist entities shouldn't have the /v or /e in them at all. pl_video_ids = set( [ get_slug2id_map().get(entry.get("entity_id")) or entry.get("id") for entry in playlist_entries if entry.get("entity_kind") == "Video" ] ) pl_exercise_ids = set( [ entry.get("entity_id") or entry.get("id") for entry in playlist_entries if (entry.get("entity_kind") or entry.get("kind")) == "Exercise" ] ) return (pl_video_ids, pl_exercise_ids)
def user_progress_detail(cls, user_id, playlist_id): """ Return a list of video, exercise, and quiz log PlaylistProgressDetail objects associated with a specific user and playlist ID. """ user = FacilityUser.objects.get(id=user_id) playlist = next((pl for pl in [plist.__dict__ for plist in Playlist.all()] + get_leafed_topics() if pl.get("id") == playlist_id), None) pl_video_ids, pl_exercise_ids = cls.get_playlist_entry_ids(playlist) # Retrieve video, exercise, and quiz logs that appear in this playlist user_vid_logs, user_ex_logs = cls.get_user_logs(user, pl_video_ids, pl_exercise_ids) # Format & append quiz the quiz log, if it exists quiz_exists, quiz_log, quiz_pct_score = cls.get_quiz_log(user, (playlist.get("entries") or playlist.get("children")), playlist.get("id")) # Finally, sort an ordered list of the playlist entries, with user progress # injected where it exists. progress_details = list() for ent in (playlist.get("entries") or playlist.get("children")): entry = {} kind = ent.get("entity_kind") or ent.get("kind") if kind == "Divider": continue elif kind == "Video": entity_id = get_slug2id_map().get(ent.get("entity_id")) or ent.get("id") vid_log = next((vid_log for vid_log in user_vid_logs if vid_log["video_id"] == entity_id), None) if vid_log: if vid_log.get("complete"): status = "complete" elif vid_log.get("total_seconds_watched"): status = "inprogress" else: status = "notstarted" leaf_node = get_content_cache().get(vid_log["video_id"]) entry = { "id": entity_id, "kind": kind, "status": status, "score": int(float(vid_log.get("points")) / float(750) * 100), "title": leaf_node["title"], "path": leaf_node["path"], } elif kind == "Exercise": entity_id = (ent.get("entity_id") or ent.get("id")) ex_log = next((ex_log for ex_log in user_ex_logs if ex_log["exercise_id"] == entity_id), None) if ex_log: if ex_log.get("struggling"): status = "struggling" elif ex_log.get("complete"): status = "complete" elif ex_log.get("attempts"): status = "inprogress" ex_log_id = ex_log.get("exercise_id") leaf_node = get_exercise_cache().get(ex_log_id) entry = { "id": ex_log_id, "kind": kind, "status": status, "score": ex_log.get("streak_progress"), "title": leaf_node["title"], "path": leaf_node["path"], } elif kind == "Quiz": entity_id = playlist["id"] if quiz_log: if quiz_log.complete: if quiz_pct_score <= 59: status = "fail" elif quiz_pct_score <= 79: status = "borderline" else: status = "pass" elif quiz_log.attempts: status = "inprogress" else: status = "notstarted" quiz_log_id = quiz_log.quiz entry = { "id": quiz_log_id, "kind": "Quiz", "status": status, "score": quiz_pct_score, "title": playlist.get("title"), "path": "", } if not entry: entry = cls.create_empty_entry(entity_id, kind, playlist) progress_details.append(cls(**entry)) return progress_details
"""Classes used by the student progress tastypie API""" import json from django.core.urlresolvers import reverse, NoReverseMatch from django.core.exceptions import ObjectDoesNotExist from kalite.facility.models import FacilityUser from kalite.main.models import ExerciseLog, VideoLog from kalite.playlist.models import VanillaPlaylist as Playlist, QuizLog from kalite.topic_tools import get_slug2id_map, get_id2slug_map, convert_leaf_url_to_id, get_leafed_topics, get_content_cache, get_exercise_cache ID2SLUG_MAP = get_id2slug_map() SLUG2ID_MAP = get_slug2id_map() class PlaylistProgressParent: """Parent class for helpful class methods""" @classmethod def get_playlist_entry_ids(cls, playlist): """Return a tuple of the playlist's video ids and exercise ids as sets""" playlist_entries = playlist.get("entries") or playlist.get("children") # TODO(dylanjbarth): 0.13 playlist entities shouldn't have the /v or /e in them at all. pl_video_ids = set([ SLUG2ID_MAP.get(entry.get("entity_id")) or entry.get("id") for entry in playlist_entries if entry.get("entity_kind") == "Video" ]) pl_exercise_ids = set([ entry.get("entity_id") or entry.get("id") for entry in playlist_entries if (entry.get("entity_kind") or entry.get("kind")) == "Exercise"
def create_all_mappings(force=False, frequency_to_save=100, response_to_check=None, date_to_check=None, map_file=SRTS_JSON_FILEPATH): """ Write or update JSON file that maps from YouTube ID to Amara code and languages available. This command updates the json file that records what languages videos have been subtitled in. It loops through all video ids, records a list of which languages Amara says it has been subtitled in and meta data about the request (e.g. date, response code). See the schema in the docstring for fcn update_video_entry. """ youtube_ids = get_slug2id_map().values() # Initialize the data if not os.path.exists(map_file): ensure_dir(os.path.dirname(map_file)) if not settings.DEBUG: raise CommandError( "TRUE central server's srts dict should never be empty; where is your %s?" % map_file) else: # Pull it from the central server try: logging.debug( "Fetching central server's srt availability file.") resp = requests.get( "http://kalite.learningequality.org:7007/media/testing/%s" % (os.path.basename(map_file))) resp.raise_for_status() with open(map_file, "w") as fp: fp.write(resp.content) srts_dict = json.loads(resp.content) except Exception as e: logging.error( "Failed to download TRUE central server's srts availability file: %s" % e) srts_dict = {} else: # Open the file, read, and clean out old videos. # only handle the error if force=True. # Otherwise, these data are too valuable to lose, so just assume a temp problem. srts_dict = softload_json(map_file, raises=not force, logger=logging.error) if srts_dict: logging.info("Loaded %d mappings." % (len(srts_dict))) # Set of videos no longer used by KA Lite removed_videos = set(srts_dict.keys()) - set(youtube_ids) if removed_videos: logging.info( "Removing subtitle information for %d videos (no longer used)." % len(removed_videos)) for vid in removed_videos: del srts_dict[vid] logging.info("Querying %d mappings." % (len(youtube_ids) - (0 if (force or date_to_check) else len(srts_dict)))) # Once we have the current mapping, proceed through logic to update the mapping n_refreshed = 0 # keep track to avoid writing if nothing's been refreshed. n_new_entries = 0 # keep track for reporting n_failures = 0 # keep track for reporting for youtube_id in youtube_ids: # Decide whether or not to update this video based on the arguments provided at the command line cached = youtube_id in srts_dict if not force and cached: # First, check against date flag_for_refresh = True # not (response_code or last_attempt) last_attempt = srts_dict[youtube_id].get("last_attempt") last_attempt = None if not last_attempt else datetime.datetime.strptime( last_attempt, '%Y-%m-%d') flag_for_refresh = flag_for_refresh and ( not date_to_check or date_to_check > last_attempt) if not flag_for_refresh: logging.debug("Skipping %s for date-check" % youtube_id) continue # Second, check against response code response_code = srts_dict[youtube_id].get("api_response") flag_for_refresh = flag_for_refresh and ( not response_to_check or response_to_check == "all" or response_to_check == response_code) if not (flag_for_refresh): logging.debug("Skipping %s for response-code" % youtube_id) continue if not response_to_check and not date_to_check and cached: # no flags specified and already cached - skip logging.debug( "Skipping %s for already-cached and no flags specified" % youtube_id) continue # We're gonna check; just report the reason why. if force and not cached: logging.debug( "Updating %s because force flag (-f) given and video not cached." % youtube_id) elif force and cached: logging.debug( "Updating %s because force flag (-f) given. Video was previously cached." % youtube_id) else: logging.debug( "Updating %s because video subtitles metadata not yet cached." % youtube_id) # If it makes it to here without hitting a continue, then update the entry try: srts_dict[youtube_id] = update_video_entry(youtube_id, entry=srts_dict.get( youtube_id, {})) n_refreshed += 1 except Exception as e: logging.warn("Error updating video %s: %s" % (youtube_id, e)) n_failures += 1 continue if n_new_entries % frequency_to_save == 0: logging.info("On loop %d dumping dictionary into %s" % (n_new_entries, map_file)) with open(map_file, 'wb') as fp: json.dump(srts_dict, fp) n_new_entries += 1 # Finished the loop: save and report if n_refreshed > 0: with open(map_file, 'wb') as fp: json.dump(srts_dict, fp) if n_failures == 0: logging.info( "Great success! Added %d entries, updated %d entries, of %d total." % (n_new_entries, n_refreshed, len(srts_dict))) else: logging.warn( "Stored %d new entries, refreshed %d entries, but with %s failures, of %d total." % (n_new_entries, n_refreshed, n_failures, len(srts_dict))) return n_refreshed != 0
def user_progress_detail(cls, user_id, playlist_id): """ Return a list of video, exercise, and quiz log PlaylistProgressDetail objects associated with a specific user and playlist ID. """ user = FacilityUser.objects.get(id=user_id) playlist = next( ( pl for pl in [plist.__dict__ for plist in Playlist.all()] + get_leafed_topics() if pl.get("id") == playlist_id ), None, ) pl_video_ids, pl_exercise_ids = cls.get_playlist_entry_ids(playlist) # Retrieve video, exercise, and quiz logs that appear in this playlist user_vid_logs, user_ex_logs = cls.get_user_logs(user, pl_video_ids, pl_exercise_ids) # Format & append quiz the quiz log, if it exists quiz_exists, quiz_log, quiz_pct_score = cls.get_quiz_log( user, (playlist.get("entries") or playlist.get("children")), playlist.get("id") ) # Finally, sort an ordered list of the playlist entries, with user progress # injected where it exists. progress_details = list() for ent in playlist.get("entries") or playlist.get("children"): entry = {} kind = ent.get("entity_kind") or ent.get("kind") if kind == "Divider": continue elif kind == "Video": entity_id = get_slug2id_map().get(ent.get("entity_id")) or ent.get("id") vid_log = next((vid_log for vid_log in user_vid_logs if vid_log["video_id"] == entity_id), None) if vid_log: if vid_log.get("complete"): status = "complete" elif vid_log.get("total_seconds_watched"): status = "inprogress" else: status = "notstarted" leaf_node = get_content_cache().get(vid_log["video_id"]) entry = { "id": entity_id, "kind": kind, "status": status, "score": int(float(vid_log.get("points")) / float(750) * 100), "title": leaf_node["title"], "path": leaf_node["path"], } elif kind == "Exercise": entity_id = ent.get("entity_id") or ent.get("id") ex_log = next((ex_log for ex_log in user_ex_logs if ex_log["exercise_id"] == entity_id), None) if ex_log: if ex_log.get("struggling"): status = "struggling" elif ex_log.get("complete"): status = "complete" elif ex_log.get("attempts"): status = "inprogress" ex_log_id = ex_log.get("exercise_id") leaf_node = get_exercise_cache().get(ex_log_id) entry = { "id": ex_log_id, "kind": kind, "status": status, "score": ex_log.get("streak_progress"), "title": leaf_node["title"], "path": leaf_node["path"], } elif kind == "Quiz": entity_id = playlist["id"] if quiz_log: if quiz_log.complete: if quiz_pct_score <= 59: status = "fail" elif quiz_pct_score <= 79: status = "borderline" else: status = "pass" elif quiz_log.attempts: status = "inprogress" else: status = "notstarted" quiz_log_id = quiz_log.quiz entry = { "id": quiz_log_id, "kind": "Quiz", "status": status, "score": quiz_pct_score, "title": playlist.get("title"), "path": "", } if not entry: entry = cls.create_empty_entry(entity_id, kind, playlist) progress_details.append(cls(**entry)) return progress_details
def create_all_mappings(force=False, frequency_to_save=100, response_to_check=None, date_to_check=None, map_file=SRTS_JSON_FILEPATH): """ Write or update JSON file that maps from YouTube ID to Amara code and languages available. This command updates the json file that records what languages videos have been subtitled in. It loops through all video ids, records a list of which languages Amara says it has been subtitled in and meta data about the request (e.g. date, response code). See the schema in the docstring for fcn update_video_entry. """ youtube_ids = get_slug2id_map().values() # Initialize the data if not os.path.exists(map_file): ensure_dir(os.path.dirname(map_file)) if not settings.DEBUG: raise CommandError("TRUE central server's srts dict should never be empty; where is your %s?" % map_file) else: # Pull it from the central server try: logging.debug("Fetching central server's srt availability file.") resp = requests.get("http://kalite.learningequality.org:7007/media/testing/%s" % (os.path.basename(map_file))) resp.raise_for_status() with open(map_file, "w") as fp: fp.write(resp.content) srts_dict = json.loads(resp.content) except Exception as e: logging.error("Failed to download TRUE central server's srts availability file: %s" % e) srts_dict = {} else: # Open the file, read, and clean out old videos. # only handle the error if force=True. # Otherwise, these data are too valuable to lose, so just assume a temp problem. srts_dict = softload_json(map_file, raises=not force, logger=logging.error) if srts_dict: logging.info("Loaded %d mappings." % (len(srts_dict))) # Set of videos no longer used by KA Lite removed_videos = set(srts_dict.keys()) - set(youtube_ids) if removed_videos: logging.info("Removing subtitle information for %d videos (no longer used)." % len(removed_videos)) for vid in removed_videos: del srts_dict[vid] logging.info("Querying %d mappings." % (len(youtube_ids) - (0 if (force or date_to_check) else len(srts_dict)))) # Once we have the current mapping, proceed through logic to update the mapping n_refreshed = 0 # keep track to avoid writing if nothing's been refreshed. n_new_entries = 0 # keep track for reporting n_failures = 0 # keep track for reporting for youtube_id in youtube_ids: # Decide whether or not to update this video based on the arguments provided at the command line cached = youtube_id in srts_dict if not force and cached: # First, check against date flag_for_refresh = True # not (response_code or last_attempt) last_attempt = srts_dict[youtube_id].get("last_attempt") last_attempt = None if not last_attempt else datetime.datetime.strptime(last_attempt, '%Y-%m-%d') flag_for_refresh = flag_for_refresh and (not date_to_check or date_to_check > last_attempt) if not flag_for_refresh: logging.debug("Skipping %s for date-check" % youtube_id) continue # Second, check against response code response_code = srts_dict[youtube_id].get("api_response") flag_for_refresh = flag_for_refresh and (not response_to_check or response_to_check == "all" or response_to_check == response_code) if not (flag_for_refresh): logging.debug("Skipping %s for response-code" % youtube_id) continue if not response_to_check and not date_to_check and cached: # no flags specified and already cached - skip logging.debug("Skipping %s for already-cached and no flags specified" % youtube_id) continue # We're gonna check; just report the reason why. if force and not cached: logging.debug("Updating %s because force flag (-f) given and video not cached." % youtube_id) elif force and cached: logging.debug("Updating %s because force flag (-f) given. Video was previously cached." % youtube_id) else: logging.debug("Updating %s because video subtitles metadata not yet cached." % youtube_id) # If it makes it to here without hitting a continue, then update the entry try: srts_dict[youtube_id] = update_video_entry(youtube_id, entry=srts_dict.get(youtube_id, {})) n_refreshed += 1 except Exception as e: logging.warn("Error updating video %s: %s" % (youtube_id, e)) n_failures += 1 continue if n_new_entries % frequency_to_save == 0: logging.info("On loop %d dumping dictionary into %s" % (n_new_entries, map_file)) with open(map_file, 'wb') as fp: json.dump(srts_dict, fp) n_new_entries += 1 # Finished the loop: save and report if n_refreshed > 0: with open(map_file, 'wb') as fp: json.dump(srts_dict, fp) if n_failures == 0: logging.info("Great success! Added %d entries, updated %d entries, of %d total." % (n_new_entries, n_refreshed, len(srts_dict))) else: logging.warn("Stored %d new entries, refreshed %d entries, but with %s failures, of %d total." % (n_new_entries, n_refreshed, n_failures, len(srts_dict))) return n_refreshed != 0