class TopicExerciseBadgeType(db.Model): """ Every time we publish a new topic tree, we make sure there is one TopicExerciseBadgeType for each topic that contains exercises. """ topic_key_name = db.StringProperty() topic_standalone_title = db.StringProperty(indexed=False) exercise_names_required = object_property.TsvProperty(indexed=False) retired = db.BooleanProperty(default=False, indexed=False) icon_name = db.StringProperty(default="") @staticmethod def get_key_name(topic): return "topic:%s" % topic.key().name() @staticmethod def get_or_insert_for_topic(topic): if not topic: return None key_name = TopicExerciseBadgeType.get_key_name(topic) topic_badge_type = TopicExerciseBadgeType.get_by_key_name(key_name) if not topic_badge_type: topic_badge_type = TopicExerciseBadgeType.get_or_insert( key_name=key_name, topic_key_name=topic.key().name(), topic_standalone_title=topic.standalone_title, icon_name=topic.icon_name) return topic_badge_type
class Url(db.Model): url = db.StringProperty() title = db.StringProperty(indexed=False) tags = db.StringListProperty() created_on = db.DateTimeProperty(auto_now_add=True) updated_on = db.DateTimeProperty(indexed=False, auto_now=True) # List of parent topics topic_string_keys = object_property.TsvProperty(indexed=False) @property def id(self): return self.key().id() # returns the first non-hidden topic def first_topic(self): if self.topic_string_keys: return db.get(self.topic_string_keys[0]) return None @staticmethod @layer_cache.cache_with_key_fxn(lambda: "Url.get_all_%s" % setting_model. Setting.cached_content_add_date(), layer=layer_cache.Layers.Memcache) def get_all(): return Url.all().fetch(100000) @staticmethod def get_all_live(version=None): if not version: version = topic_models.TopicVersion.get_default_version() root = topic_models.Topic.get_root(version) urls = root.get_urls(include_descendants=True, include_hidden=False) # return only unique urls url_dict = dict((u.key(), u) for u in urls) return url_dict.values() @staticmethod def get_by_id_for_version(id, version=None): url = Url.get_by_id(id) # if there is a version check to see if there are any updates # to the video if version: change = topic_models.VersionContentChange.get_change_for_content( url, version) if change: url = change.updated_content(url) return url
class Video(search.Searchable, db.Model): youtube_id = db.StringProperty() url = db.StringProperty() title = db.StringProperty() description = db.TextProperty() keywords = db.StringProperty() duration = db.IntegerProperty(default=0) # A dict of properties that may only exist on some videos such as # original_url for smarthistory_videos. extra_properties = object_property.UnvalidatedObjectProperty() # Human readable, unique id that can be used in URLS. readable_id = db.StringProperty() # List of parent topics topic_string_keys = object_property.TsvProperty(indexed=False) # YouTube view count from last sync. views = db.IntegerProperty(default=0) # Date first added via KA library sync with YouTube. # This property hasn't always existsed, so for many old videos # this date may be much later than the actual YouTube upload date. date_added = db.DateTimeProperty(auto_now_add=True) # List of currently available downloadable formats for this video downloadable_formats = object_property.TsvProperty(indexed=False) _serialize_blacklist = ["downloadable_formats", "topic_string_keys"] INDEX_ONLY = ['title', 'keywords', 'description'] INDEX_TITLE_FROM_PROP = 'title' INDEX_USES_MULTI_ENTITIES = False @staticmethod def get_relative_url(readable_id): return '/video/%s' % readable_id @property def relative_url(self): return Video.get_relative_url(self.readable_id) @property def ka_url(self): return util.absolute_url(self.relative_url) @property def download_urls(self): if self.downloadable_formats: # We now serve our downloads from s3. Our old archive URL template is... # "http://www.archive.org/download/KA-converted-%s/%s.%s" # ...which we may want to fall back on in the future should s3 prices climb. url_template = "http://s3.amazonaws.com/KA-youtube-converted/%s.%s/%s.%s" url_dict = {} for suffix in self.downloadable_formats: folder_suffix = suffix if suffix == "png": # Special case: our pngs are generated during mp4 creation # and they are in the mp4 subfolders folder_suffix = "mp4" url_dict[suffix] = url_template % (self.youtube_id, folder_suffix, self.youtube_id, suffix) return url_dict return None def download_video_url(self): download_urls = self.download_urls if download_urls: return download_urls.get("mp4") return None @staticmethod def youtube_thumbnail_urls(youtube_id): # You might think that hq > sd, but you'd be wrong -- hqdefault is 480x360; # sddefault is 640x480. Unfortunately, not all videos have the big one. hq_youtube_url = "http://img.youtube.com/vi/%s/hqdefault.jpg" % youtube_id sd_youtube_url = "http://img.youtube.com/vi/%s/sddefault.jpg" % youtube_id return { "hq": hq_youtube_url, "sd": image_cache.ImageCache.url_for(sd_youtube_url, fallback_url=hq_youtube_url), } @staticmethod def get_for_readable_id(readable_id, version=None): video = None query = Video.all() query.filter('readable_id =', readable_id) # The following should just be: # video = query.get() # but the database currently contains multiple Video objects for a particular # video. Some are old. Some are due to a YouTube sync where the youtube urls # changed and our code was producing youtube_ids that ended with '_player'. # This hack gets the most recent valid Video object. key_id = 0 for v in query: if v.key().id() > key_id and not v.youtube_id.endswith('_player'): video = v key_id = v.key().id() # End of hack # if there is a version check to see if there are any updates to the video if version: # TODO(csilvers): get rid of circular dependency here import topic_models if video: change = topic_models.VersionContentChange.get_change_for_content(video, version) if change: video = change.updated_content(video) # if we didnt find any video, check to see if another video's readable_id has been updated to the one we are looking for else: changes = topic_models.VersionContentChange.get_updated_content_dict(version) for key, content in changes.iteritems(): if (type(content) == Video and content.readable_id == readable_id): video = content break return video @staticmethod @layer_cache.cache_with_key_fxn( lambda : "Video.get_all_%s" % (setting_model.Setting.cached_content_add_date()), layer=layer_cache.Layers.Memcache) def get_all(): return Video.all().fetch(100000) @staticmethod def get_all_live(version=None): # TODO(csilvers): get rid of circular dependency here import topic_models if not version: version = topic_models.TopicVersion.get_default_version() root = topic_models.Topic.get_root(version) videos = root.get_videos(include_descendants=True, include_hidden=False) # return only unique videos video_dict = dict((v.key(), v) for v in videos) return video_dict.values() def has_topic(self): return bool(self.topic_string_keys) # returns the first non-hidden topic def first_topic(self): if self.topic_string_keys: return db.get(self.topic_string_keys[0]) return None def current_user_points(self): user_video = UserVideo.get_for_video_and_user_data(self, user_models.UserData.current()) if user_video: return points.VideoPointCalculator(user_video) else: return 0 @staticmethod def get_dict(query, fxn_key): video_dict = {} for video in query.fetch(10000): video_dict[fxn_key(video)] = video return video_dict @layer_cache.cache_with_key_fxn( lambda self: "related_exercises_%s" % self.key(), layer=layer_cache.Layers.Memcache, expiration=3600 * 2) def related_exercises(self): # TODO(csilvers): get rid of circular dependency here import exercise_video_model exvids = exercise_video_model.ExerciseVideo.all() exvids.filter('video =', self.key()) exercises = [ev.exercise for ev in exvids] exercises.sort(key=lambda e: e.h_position) exercises.sort(key=lambda e: e.v_position) return exercises @staticmethod @layer_cache.cache(expiration=3600) def approx_count(): return int(setting_model.Setting.count_videos()) / 100 * 100 # Gets the data we need for the video player @staticmethod def get_play_data(readable_id, topic, discussion_options): # TODO(csilvers): get rid of circular dependency here import topic_models video = None # If we got here, we have a readable_id and a topic, so we can display # the topic and the video in it that has the readable_id. Note that we don't # query the Video entities for one with the requested readable_id because in some # cases there are multiple Video objects in the datastore with the same readable_id # (e.g. there are 2 "Order of Operations" videos). videos = topic_models.Topic.get_cached_videos_for_topic(topic) previous_video = None next_video = None for v in videos: if v.readable_id == readable_id: v.selected = 'selected' video = v elif video is None: previous_video = v else: next_video = v break if video is None: return None previous_video_dict = { "readable_id": previous_video.readable_id, "key_id": previous_video.key().id(), "title": previous_video.title } if previous_video else None next_video_dict = { "readable_id": next_video.readable_id, "key_id": next_video.key().id(), "title": next_video.title } if next_video else None if app.App.offline_mode: video_path = "/videos/" + _get_mangled_topic_name(topic.id) + "/" + video.readable_id + ".flv" else: video_path = video.download_video_url() if video.description == video.title: video.description = None related_exercises = video.related_exercises() button_top_exercise = None if related_exercises: def ex_to_dict(exercise): return { 'name': exercise.display_name, 'url': exercise.relative_url, } button_top_exercise = ex_to_dict(related_exercises[0]) user_video = UserVideo.get_for_video_and_user_data(video, user_models.UserData.current()) awarded_points = 0 if user_video: awarded_points = user_video.points subtitles_key_name = VideoSubtitles.get_key_name('en', video.youtube_id) subtitles = VideoSubtitles.get_by_key_name(subtitles_key_name) subtitles_json = None show_interactive_transcript = False if subtitles: subtitles_json = subtitles.load_json() transcript_alternative = experiments.InteractiveTranscriptExperiment.ab_test() show_interactive_transcript = (transcript_alternative == experiments.InteractiveTranscriptExperiment.SHOW) # TODO (tomyedwab): This is ugly; we would rather have these templates client-side. player_html = shared_jinja.get().render_template('videoplayer.html', user_data=user_models.UserData.current(), video_path=video_path, video=video, awarded_points=awarded_points, video_points_base=consts.VIDEO_POINTS_BASE, subtitles_json=subtitles_json, show_interactive_transcript=show_interactive_transcript) discussion_html = shared_jinja.get().render_template('videodiscussion.html', user_data=user_models.UserData.current(), video=video, topic=topic, **discussion_options) subtitles_html = shared_jinja.get().render_template('videosubtitles.html', subtitles_json=subtitles_json) return { 'title': video.title, 'extra_properties': video.extra_properties or {}, 'description': video.description, 'youtube_id': video.youtube_id, 'readable_id': video.readable_id, 'key': unicode(video.key()), 'video_path': video_path, 'button_top_exercise': button_top_exercise, 'related_exercises': [], # disabled for now 'previous_video': previous_video_dict, 'next_video': next_video_dict, 'selected_nav_link': 'watch', 'issue_labels': ('Component-Videos,Video-%s' % readable_id), 'author_profile': 'https://plus.google.com/103970106103092409324', 'player_html': player_html, 'discussion_html': discussion_html, 'subtitles_html': subtitles_html, 'videoPoints': awarded_points, } @staticmethod def reindex(video_list=None): """ Reindex Videos for search page """ if video_list is None: video_list = Video.get_all_live() num_videos = len(video_list) for i, video in enumerate(video_list): logging.info("Indexing video %i/%i: %s (%s)" % (i, num_videos, video.title, video.key())) video.index() video.indexed_title_changed()