Ejemplo n.º 1
0
    def get_modified_between_dts(user_data, dt_a, dt_b):
        query_updated = GoalList.get_updated_between_dts(user_data, dt_a, dt_b)
        query_created = GoalList.get_created_between_dts(user_data, dt_a, dt_b)

        results = util.async_queries([query_updated, query_created], limit=200)
        goals = set(results[0].get_result()) or set(results[1].get_result())
        return list(goals)
Ejemplo n.º 2
0
def fill_class_summaries_from_logs(user_data_coach, students_data, dt_start_utc):
    dt_end_utc = dt_start_utc + datetime.timedelta(days = 1)    

   # Asynchronously grab all student data at once
    async_queries = []
    for user_data_student in students_data:
        query_problem_logs = ProblemLog.get_for_user_data_between_dts(user_data_student, dt_start_utc, dt_end_utc)
        query_video_logs = VideoLog.get_for_user_data_between_dts(user_data_student, dt_start_utc, dt_end_utc)

        async_queries.append(query_problem_logs)
        async_queries.append(query_video_logs)

    # Wait for all queries to finish
    results = util.async_queries(async_queries, limit=10000)

    for i, user_data_student in enumerate(students_data):
        logging.info("working on student "+str(user_data_student.user))
        problem_and_video_logs = []

        problem_logs = results[i * 2].get_result()
        video_logs = results[i * 2 + 1].get_result()
        
        for problem_log in problem_logs:
            problem_and_video_logs.append(problem_log)
        for video_log in video_logs:
            problem_and_video_logs.append(video_log)

        problem_and_video_logs = sorted(problem_and_video_logs, key=lambda log: log.time_started())

        if problem_and_video_logs:       
            LogSummary.add_or_update_entry(user_data_coach, problem_and_video_logs, ClassDailyActivitySummary, LogSummaryTypes.CLASS_DAILY_ACTIVITY, 1440)
Ejemplo n.º 3
0
    def get_modified_between_dts(user_data, dt_a, dt_b):
        query_updated = GoalList.get_updated_between_dts(user_data, dt_a, dt_b)
        query_created = GoalList.get_created_between_dts(user_data, dt_a, dt_b)

        results = util.async_queries([query_updated, query_created], limit=200)
        goals = set(results[0].get_result()) or set(results[1].get_result())
        return list(goals)
Ejemplo n.º 4
0
def fill_realtime_recent_daily_activity_summaries(daily_activity_logs, user_data, dt_end):

    if user_data.last_daily_summary and dt_end <= user_data.last_daily_summary:
        return daily_activity_logs

    # We're willing to fill the last 4 days with realtime data if summary logs haven't
    # been compiled for some reason.
    dt_end = min(dt_end, datetime.datetime.now())
    dt_start = dt_end - datetime.timedelta(days=4)

    if user_data.last_daily_summary:
        dt_start = max(dt_start, user_data.last_daily_summary)

    query_problem_logs = models.ProblemLog.get_for_user_data_between_dts(user_data, dt_start, dt_end)
    query_video_logs = models.VideoLog.get_for_user_data_between_dts(user_data, dt_start, dt_end)

    results = util.async_queries([query_problem_logs, query_video_logs])

    problem_logs = results[0].get_result()
    video_logs = results[1].get_result()

    # Chop off hours, minutes, and seconds
    dt_start = datetime.datetime(dt_start.year, dt_start.month, dt_start.day)
    dt_end = datetime.datetime(dt_end.year, dt_end.month, dt_end.day)

    dt = dt_start

    while dt <= dt_end:
        summary = DailyActivitySummary.build(user_data, dt, problem_logs, video_logs)
        if summary.has_activity():
            log = models.DailyActivityLog.build(user_data, dt, summary)
            daily_activity_logs.append(log)
        dt += datetime.timedelta(days=1)

    return daily_activity_logs
Ejemplo n.º 5
0
def fill_realtime_recent_daily_activity_summaries(daily_activity_logs, user_data, dt_end):

    if user_data.last_daily_summary and dt_end <= user_data.last_daily_summary:
        return daily_activity_logs

    # We're willing to fill the last 4 days with realtime data if summary logs haven't
    # been compiled for some reason.
    dt_end = min(dt_end, datetime.datetime.now())
    dt_start = dt_end - datetime.timedelta(days=4)

    if user_data.last_daily_summary:
        dt_start = max(dt_start, user_data.last_daily_summary)

    query_problem_logs = models.ProblemLog.get_for_user_data_between_dts(user_data, dt_start, dt_end)
    query_video_logs = models.VideoLog.get_for_user_data_between_dts(user_data, dt_start, dt_end)

    results = util.async_queries([query_problem_logs, query_video_logs])

    problem_logs = results[0].get_result()
    video_logs = results[1].get_result()

    # Chop off hours, minutes, and seconds
    dt_start = datetime.datetime(dt_start.year, dt_start.month, dt_start.day)
    dt_end = datetime.datetime(dt_end.year, dt_end.month, dt_end.day)

    dt = dt_start

    while dt <= dt_end:
        summary = DailyActivitySummary.build(user_data, dt, problem_logs, video_logs)
        if summary.has_activity():
            log = models.DailyActivityLog.build(user_data, dt, summary)
            daily_activity_logs.append(log)
        dt += datetime.timedelta(days=1)

    return daily_activity_logs
def class_exercises_over_time_graph_context(user_data, student_list):

    if not user_data:
        return {}

    end_date = None

    if student_list:
        students_data = student_list.get_students_data()
    else:
        students_data = user_data.get_students_data()

    dict_student_exercises = {}
    dict_exercises = {}

    async_queries = []
    for user_data_student in students_data:
        query = models.UserExercise.all()
        query.filter('user ='******'proficient_date >', None)
        query.order('proficient_date')
        async_queries.append(query)

    # Wait for all queries to finish
    results = util.async_queries(async_queries, limit=10000)

    for i, user_data_student in enumerate(students_data):
        student_nickname = user_data_student.nickname
        dict_student_exercises[student_nickname] = {
            "nickname": student_nickname,
            "email": user_data_student.email,
            "exercises": []
        }

        exercises = results[i].get_result()

        for user_exercise in exercises:
            joined = min(user_data.joined, user_exercise.proficient_date)
            days_until_proficient = (user_exercise.proficient_date -
                                     joined).days
            proficient_date = user_exercise.proficient_date.strftime(
                '%m/%d/%Y')
            data = ExerciseData(student_nickname, user_exercise.exercise,
                                user_exercise.exercise, days_until_proficient,
                                proficient_date)
            dict_student_exercises[student_nickname]["exercises"].append(data)
            end_date = user_exercise.proficient_date

    return {
        "dict_student_exercises":
        dict_student_exercises,
        "user_data_students":
        students_data,
        "c_points":
        reduce(lambda a, b: a + b, map(lambda s: s.points, students_data), 0)
    }
Ejemplo n.º 7
0
def recent_activity_for(user_data, dt_start, dt_end):

    query_user_badges = models_badges.UserBadge.get_for_user_data_between_dts(user_data, dt_start, dt_end)
    query_problem_logs = models.ProblemLog.get_for_user_data_between_dts(user_data, dt_start, dt_end)
    query_video_logs = models.VideoLog.get_for_user_data_between_dts(user_data, dt_start, dt_end)

    results = util.async_queries([query_user_badges, query_problem_logs, query_video_logs], limit=200)

    list_recent_activity_types = [
            recent_badge_activity(results[0].get_result()),
            recent_exercise_activity(results[1].get_result()),
            recent_video_activity(results[2].get_result()),
    ]
    list_recent_activity = [activity for sublist in list_recent_activity_types for activity in sublist]

    return collapse_recent_activity(list_recent_activity)[:MOST_RECENT_ITEMS]
def class_exercises_over_time_graph_context(user_data, student_list):

    if not user_data:
        return {}

    end_date = None

    if student_list:
        students_data = student_list.get_students_data()
    else:
        students_data = user_data.get_students_data()

    dict_student_exercises = {}
    dict_exercises = {}

    async_queries = []
    for user_data_student in students_data:
        query = models.UserExercise.all()
        query.filter('user ='******'proficient_date >', None)
        query.order('proficient_date')
        async_queries.append(query)

    # Wait for all queries to finish
    results = util.async_queries(async_queries, limit=10000)

    for i, user_data_student in enumerate(students_data):
        student_nickname = user_data_student.nickname
        dict_student_exercises[student_nickname] = { "nickname": student_nickname, "email": user_data_student.email, "exercises": [] }
        
        exercises = results[i].get_result()

        for user_exercise in exercises:
            joined = min(user_data.joined, user_exercise.proficient_date)
            days_until_proficient = (user_exercise.proficient_date - joined).days
            proficient_date = user_exercise.proficient_date.strftime('%m/%d/%Y')
            data = ExerciseData(student_nickname, user_exercise.exercise, user_exercise.exercise, days_until_proficient, proficient_date)
            dict_student_exercises[student_nickname]["exercises"].append(data)
            end_date = user_exercise.proficient_date

    return {
            "dict_student_exercises": dict_student_exercises,
            "user_data_students": students_data,
            "c_points": reduce(lambda a, b: a + b, map(lambda s: s.points, students_data), 0)
            }
Ejemplo n.º 9
0
def fill_class_summaries_from_logs(user_data_coach, students_data,
                                   dt_start_utc):
    dt_end_utc = dt_start_utc + datetime.timedelta(days=1)

    # Asynchronously grab all student data at once
    async_queries = []
    for user_data_student in students_data:
        query_problem_logs = ProblemLog.get_for_user_data_between_dts(
            user_data_student, dt_start_utc, dt_end_utc)
        query_video_logs = VideoLog.get_for_user_data_between_dts(
            user_data_student, dt_start_utc, dt_end_utc)

        async_queries.append(query_problem_logs)
        async_queries.append(query_video_logs)

    # Wait for all queries to finish
    results = util.async_queries(async_queries, limit=10000)

    for i, user_data_student in enumerate(students_data):
        logging.info("working on student " + str(user_data_student.user))
        problem_and_video_logs = []

        problem_logs = results[i * 2].get_result()
        video_logs = results[i * 2 + 1].get_result()

        for problem_log in problem_logs:
            problem_and_video_logs.append(problem_log)
        for video_log in video_logs:
            problem_and_video_logs.append(video_log)

        problem_and_video_logs = sorted(problem_and_video_logs,
                                        key=lambda log: log.time_started())

        if problem_and_video_logs:
            LogSummary.add_or_update_entry(
                user_data_coach, problem_and_video_logs,
                ClassDailyActivitySummary,
                LogSummaryTypes.CLASS_DAILY_ACTIVITY, 1440)
Ejemplo n.º 10
0
def recent_activity_for(user_data, dt_start, dt_end):

    query_user_badges = models_badges.UserBadge.get_for_user_data_between_dts(
        user_data, dt_start, dt_end)
    query_problem_logs = models.ProblemLog.get_for_user_data_between_dts(
        user_data, dt_start, dt_end)
    query_video_logs = models.VideoLog.get_for_user_data_between_dts(
        user_data, dt_start, dt_end)

    results = util.async_queries(
        [query_user_badges, query_problem_logs, query_video_logs], limit=200)

    list_recent_activity_types = [
        recent_badge_activity(results[0].get_result()),
        recent_exercise_activity(results[1].get_result()),
        recent_video_activity(results[2].get_result()),
    ]
    list_recent_activity = [
        activity for sublist in list_recent_activity_types
        for activity in sublist
    ]

    return collapse_recent_activity(list_recent_activity)[:MOST_RECENT_ITEMS]
Ejemplo n.º 11
0
def library_content_html():
    # No cache found -- regenerate HTML
    smart_history = getSmartHistoryContent()

    all_playlists = []

    dict_videos = {}
    dict_videos_counted = {}
    dict_playlists = {}
    dict_playlists_by_title = {}
    dict_video_playlists = {}

    async_queries = [
        Video.all(),
        Playlist.all(),
        VideoPlaylist.all().filter('live_association = ',
                                   True).order('video_position'),
    ]

    results = util.async_queries(async_queries)

    for video in results[0].get_result():
        dict_videos[video.key()] = video

    for playlist in results[1].get_result():
        dict_playlists[playlist.key()] = playlist
        if playlist.title in topics_list:
            dict_playlists_by_title[playlist.title] = playlist

    for video_playlist in results[2].get_result():
        playlist_key = VideoPlaylist.playlist.get_value_for_datastore(
            video_playlist)
        video_key = VideoPlaylist.video.get_value_for_datastore(video_playlist)

        if dict_videos.has_key(video_key) and dict_playlists.has_key(
                playlist_key):
            video = dict_videos[video_key]
            playlist = dict_playlists[playlist_key]
            fast_video_playlist_dict = {"video": video, "playlist": playlist}

            if dict_video_playlists.has_key(playlist_key):
                dict_video_playlists[playlist_key].append(
                    fast_video_playlist_dict)
            else:
                dict_video_playlists[playlist_key] = [fast_video_playlist_dict]

            if dict_playlists_by_title.has_key(playlist.title):
                # Only count videos in topics_list
                dict_videos_counted[video.youtube_id] = True

    # Update count of all distinct videos associated w/ a live playlist
    Setting.count_videos(len(dict_videos_counted.keys()))

    for topic in topics_list:
        if topic in dict_playlists_by_title:
            playlist = dict_playlists_by_title[topic]
            playlist_key = playlist.key()
            playlist_videos = dict_video_playlists.get(playlist_key) or []

            if not playlist_videos:
                logging.error('Playlist %s has no videos!', playlist.title)

            playlist_data = {
                'title': topic,
                'topic': topic,
                'playlist': playlist,
                'videos': playlist_videos,
                'next': None
            }

            all_playlists.append(playlist_data)

    playlist_data_prev = None
    for playlist_data in all_playlists:
        if playlist_data_prev:
            playlist_data_prev['next'] = playlist_data
        playlist_data_prev = playlist_data

    # Separating out the columns because the formatting is a little different on each column
    template_values = {
        'App': App,
        'all_playlists': all_playlists,
        'smart_history': smart_history,
    }

    html = shared_jinja.get().render_template("library_content_template.html",
                                              **template_values)

    # Set shared date of last generated content
    Setting.cached_library_content_date(str(datetime.datetime.now()))

    return html
Ejemplo n.º 12
0
def library_content_html():
    # No cache found -- regenerate HTML
    smart_history = getSmartHistoryContent()

    all_playlists = []

    dict_videos = {}
    dict_videos_counted = {}
    dict_playlists = {}
    dict_playlists_by_title = {}
    dict_video_playlists = {}

    async_queries = [
        Video.all(),
        Playlist.all(),
        VideoPlaylist.all().filter('live_association = ', True).order('video_position'),
    ]

    results = util.async_queries(async_queries)

    for video in results[0].get_result():
        dict_videos[video.key()] = video

    for playlist in results[1].get_result():
        dict_playlists[playlist.key()] = playlist
        if playlist.title in topics_list:
            dict_playlists_by_title[playlist.title] = playlist

    for video_playlist in results[2].get_result():
        playlist_key = VideoPlaylist.playlist.get_value_for_datastore(video_playlist)
        video_key = VideoPlaylist.video.get_value_for_datastore(video_playlist)

        if dict_videos.has_key(video_key) and dict_playlists.has_key(playlist_key):
            video = dict_videos[video_key]
            playlist = dict_playlists[playlist_key]
            fast_video_playlist_dict = {"video":video, "playlist":playlist}

            if dict_video_playlists.has_key(playlist_key):
                dict_video_playlists[playlist_key].append(fast_video_playlist_dict)
            else:
                dict_video_playlists[playlist_key] = [fast_video_playlist_dict]

            if dict_playlists_by_title.has_key(playlist.title):
                # Only count videos in topics_list
                dict_videos_counted[video.youtube_id] = True

    # Update count of all distinct videos associated w/ a live playlist
    Setting.count_videos(len(dict_videos_counted.keys()))

    for topic in topics_list:
        if topic in dict_playlists_by_title:
            playlist = dict_playlists_by_title[topic]
            playlist_key = playlist.key()
            playlist_videos = dict_video_playlists.get(playlist_key) or []

            if not playlist_videos:
                logging.error('Playlist %s has no videos!', playlist.title)

            playlist_data = {
                     'title': topic,
                     'topic': topic,
                     'playlist': playlist,
                     'videos': playlist_videos,
                     'next': None
                     }

            all_playlists.append(playlist_data)

    playlist_data_prev = None
    for playlist_data in all_playlists:
        if playlist_data_prev:
            playlist_data_prev['next'] = playlist_data
        playlist_data_prev = playlist_data

    # Separating out the columns because the formatting is a little different on each column
    template_values = {
        'App' : App,
        'all_playlists': all_playlists,
        'smart_history': smart_history,
        }

    html = shared_jinja.get().render_template("library_content_template.html", **template_values)

    # Set shared date of last generated content
    Setting.cached_library_content_date(str(datetime.datetime.now()))

    return html
Ejemplo n.º 13
0
Archivo: main.py Proyecto: di445/server
    def get(self):
        query = self.request.get('page_search_query')
        template_values = {'page_search_query': query}
        query = query.strip()
        if len(query) < search.SEARCH_PHRASE_MIN_LENGTH:
            if len(query) > 0:
                template_values.update({
                    'query_too_short': search.SEARCH_PHRASE_MIN_LENGTH
                })
            self.render_jinja2_template("searchresults.html", template_values)
            return

        # Do an async query for all ExerciseVideos, since this may be slow
        exvids_query = ExerciseVideo.all()
        exvids_future = util.async_queries([exvids_query])

        # One full (non-partial) search, then sort by kind
        all_text_keys = Topic.full_text_search(
                query, limit=50, kind=None,
                stemming=Topic.INDEX_STEMMING,
                multi_word_literal=Topic.INDEX_MULTI_WORD)

        # Quick title-only partial search
        topic_partial_results = filter(
                lambda topic_dict: query in topic_dict["title"].lower(),
                autocomplete.topic_title_dicts())
        video_partial_results = filter(
                lambda video_dict: query in video_dict["title"].lower(),
                autocomplete.video_title_dicts())
        url_partial_results = filter(
                lambda url_dict: query in url_dict["title"].lower(),
                autocomplete.url_title_dicts())

        # Combine results & do one big get!
        all_key_list = [str(key_and_title[0]) for key_and_title in all_text_keys]
        # all_key_list.extend([result["key"] for result in topic_partial_results])
        all_key_list.extend([result["key"] for result in video_partial_results])
        all_key_list.extend([result["key"] for result in url_partial_results])
        all_key_list = list(set(all_key_list))

        # Filter out anything that isn't a Topic, Url or Video
        all_key_list = [key for key in all_key_list if db.Key(key).kind() in ["Topic", "Url", "Video"]]

        # Get all the entities
        all_entities = db.get(all_key_list)

        # Group results by type
        topics = []
        videos = []
        for entity in all_entities:
            if isinstance(entity, Topic):
                topics.append(entity)
            elif isinstance(entity, Video):
                videos.append(entity)
            elif isinstance(entity, Url):
                videos.append(entity)
            elif entity:
                logging.info("Found unknown object " + repr(entity))

        topic_count = len(topics)

        # Get topics for videos not in matching topics
        filtered_videos = []
        filtered_videos_by_key = {}
        for video in videos:
            if [(str(topic.key()) in video.topic_string_keys) for topic in topics].count(True) == 0:
                video_topic = video.first_topic()
                if video_topic != None:
                    topics.append(video_topic)
                    filtered_videos.append(video)
                    filtered_videos_by_key[str(video.key())] = []
            else:
                filtered_videos.append(video)
                filtered_videos_by_key[str(video.key())] = []
        video_count = len(filtered_videos)

        # Get the related exercises
        all_exercise_videos = exvids_future[0].get_result()
        exercise_keys = []
        for exvid in all_exercise_videos:
            video_key = str(ExerciseVideo.video.get_value_for_datastore(exvid))
            if video_key in filtered_videos_by_key:
                exercise_key = ExerciseVideo.exercise.get_value_for_datastore(exvid)
                video_exercise_keys = filtered_videos_by_key[video_key]
                video_exercise_keys.append(exercise_key)
                exercise_keys.append(exercise_key)
        exercises = db.get(exercise_keys)

        # Sort exercises with videos
        video_exercises = {}
        for video_key, exercise_keys in filtered_videos_by_key.iteritems():
            video_exercises[video_key] = map(lambda exkey: [exercise for exercise in exercises if exercise.key() == exkey][0], exercise_keys)

        # Count number of videos in each topic and sort descending
        if topics:
            if len(filtered_videos) > 0:
                for topic in topics:
                    topic.match_count = [(str(topic.key()) in video.topic_string_keys) for video in filtered_videos].count(True)
                topics = sorted(topics, key=lambda topic: -topic.match_count)
            else:
                for topic in topics:
                    topic.match_count = 0

        template_values.update({
                           'topics': topics,
                           'videos': filtered_videos,
                           'video_exercises': video_exercises,
                           'search_string': query,
                           'video_count': video_count,
                           'topic_count': topic_count,
                           })
        
        self.render_jinja2_template("searchresults.html", template_values)
Ejemplo n.º 14
0
    def get_old(self):
        """ Deprecated old version of search, so we can Gandalf in the new one.

        If new search is working, this should be taken out by May 31, 2012.
        """

        show_update = False
        if App.is_dev_server and user_util.is_current_user_admin():
            update = self.request_bool("update", False)
            if update:
                self.update()

            version_number = layer_cache.KeyValueCache.get(
                "last_dev_topic_vesion_indexed")
            default_version = topic_models.TopicVersion.get_default_version()
            if version_number != default_version.number:
                show_update = True

        query = self.request.get('page_search_query')
        template_values = {'page_search_query': query}
        query = query.strip()
        if len(query) < search.SEARCH_PHRASE_MIN_LENGTH:
            if len(query) > 0:
                template_values.update({
                    'query_too_short': search.SEARCH_PHRASE_MIN_LENGTH
                })
            self.render_jinja2_template("searchresults.html", template_values)
            return
        searched_phrases = []

        # Do an async query for all ExerciseVideos, since this may be slow
        exvids_query = ExerciseVideo.all()
        exvids_future = util.async_queries([exvids_query])

        # One full (non-partial) search, then sort by kind
        all_text_keys = Topic.full_text_search(
                query, limit=50, kind=None,
                stemming=Topic.INDEX_STEMMING,
                multi_word_literal=Topic.INDEX_MULTI_WORD,
                searched_phrases_out=searched_phrases)

        # Quick title-only partial search
        topic_partial_results = filter(
                lambda topic_dict: query in topic_dict["title"].lower(),
                autocomplete.topic_title_dicts())
        video_partial_results = filter(
                lambda video_dict: query in video_dict["title"].lower(),
                autocomplete.video_title_dicts())
        url_partial_results = filter(
                lambda url_dict: query in url_dict["title"].lower(),
                autocomplete.url_title_dicts())

        # Combine results & do one big get!
        all_keys = [str(key_and_title[0]) for key_and_title in all_text_keys]
        all_keys.extend([result["key"] for result in topic_partial_results])
        all_keys.extend([result["key"] for result in video_partial_results])
        all_keys.extend([result["key"] for result in url_partial_results])
        all_keys = list(set(all_keys))

        # Filter out anything that isn't a Topic, Url or Video
        all_keys = [key for key in all_keys
                    if db.Key(key).kind() in ["Topic", "Url", "Video"]]

        # Get all the entities
        all_entities = db.get(all_keys)

        # Group results by type
        topics = []
        videos = []
        for entity in all_entities:
            if isinstance(entity, Topic):
                topics.append(entity)
            elif isinstance(entity, Video):
                videos.append(entity)
            elif isinstance(entity, Url):
                videos.append(entity)
            elif entity:
                logging.info("Found unknown object " + repr(entity))

        # Get topics for videos not in matching topics
        filtered_videos = []
        filtered_videos_by_key = {}
        for video in videos:
            if [(str(topic.key()) in video.topic_string_keys)
                for topic in topics].count(True) == 0:
                video_topic = video.first_topic()
                if video_topic != None:
                    topics.append(video_topic)
                    filtered_videos.append(video)
                    filtered_videos_by_key[str(video.key())] = []
            else:
                filtered_videos.append(video)
                filtered_videos_by_key[str(video.key())] = []
        video_count = len(filtered_videos)

        # Get the related exercises
        all_exercise_videos = exvids_future[0].get_result()
        exercise_keys = []
        for exvid in all_exercise_videos:
            video_key = str(ExerciseVideo.video.get_value_for_datastore(exvid))
            if video_key in filtered_videos_by_key:
                exercise_key = ExerciseVideo.exercise.get_value_for_datastore(
                    exvid)
                video_exercise_keys = filtered_videos_by_key[video_key]
                video_exercise_keys.append(exercise_key)
                exercise_keys.append(exercise_key)
        exercises = db.get(exercise_keys)

        # Sort exercises with videos
        video_exercises = {}
        for video_key, exercise_keys in filtered_videos_by_key.iteritems():
            video_exercises[video_key] = map(
                lambda exkey: [exercise for exercise in exercises
                               if exercise.key() == exkey][0], exercise_keys)

        # Count number of videos in each topic and sort descending
        topic_count = 0
        matching_topic_count = 0
        if topics:
            if len(filtered_videos) > 0:
                for topic in topics:
                    topic.match_count = [
                        (str(topic.key()) in video.topic_string_keys)
                        for video in filtered_videos].count(True)
                    if topic.match_count > 0:
                        topic_count += 1

                topics = sorted(topics,
                                key=lambda topic: topic.match_count,
                                reverse=True)
            else:
                for topic in topics:
                    topic.match_count = 0

            for topic in topics:
                if topic.title.lower() == query:
                    topic.matches = True
                    matching_topic_count += 1

                    child_topics = topic.get_child_topics(
                        include_descendants=True)
                    topic.child_topics = [t for t in child_topics
                                          if t.has_content()]

        template_values.update({
                           'show_update': show_update,
                           'topics': topics,
                           'videos': filtered_videos,
                           'video_exercises': video_exercises,
                           'search_string': query,
                           'video_count': video_count,
                           'topic_count': topic_count,
                           'matching_topic_count': matching_topic_count
                           })

        self.render_jinja2_template("searchresults.html", template_values)
Ejemplo n.º 15
0
    def get_classtime_table_old(self, students_data, dt_start_utc):

        dt_start_ctz = self.dt_to_ctz(dt_start_utc)
        dt_end_ctz = dt_start_ctz + datetime.timedelta(days = 1)

        column = 0

        classtime_table = ClassTimeTable(dt_start_ctz, dt_end_ctz)

        # Asynchronously grab all student data at once
        async_queries = []
        for user_data_student in students_data:

            query_problem_logs = exercise_models.ProblemLog.get_for_user_data_between_dts(user_data_student, self.dt_to_utc(dt_start_ctz), self.dt_to_utc(dt_end_ctz))
            query_video_logs = video_models.VideoLog.get_for_user_data_between_dts(user_data_student, self.dt_to_utc(dt_start_ctz), self.dt_to_utc(dt_end_ctz))

            async_queries.append(query_problem_logs)
            async_queries.append(query_video_logs)

        # Wait for all queries to finish
        results = util.async_queries(async_queries, limit=10000)

        rows = 0
        chunks = 0
        for i, user_data_student in enumerate(students_data):

            problem_logs = results[i * 2].get_result()
            video_logs = results[i * 2 + 1].get_result()

            problem_and_video_logs = []

            for problem_log in problem_logs:
                problem_and_video_logs.append(problem_log)
            for video_log in video_logs:
                problem_and_video_logs.append(video_log)

            problem_and_video_logs = sorted(problem_and_video_logs, key=lambda log: log.time_started())
            rows += len(problem_and_video_logs)
            
            chunk_current = None

            for activity in problem_and_video_logs:   

                if chunk_current is not None and self.dt_to_ctz(activity.time_started()) > (chunk_current.end + self.chunk_delta):
                    chunks += 1

                    classtime_table.drop_into_column_old(chunk_current, column)
                    chunk_current.description()
                    chunk_current = None

                if chunk_current is None:
                    chunk_current = ClassTimeChunk()
                    chunk_current.user_data_student = user_data_student
                    chunk_current.start = self.dt_to_ctz(activity.time_started())
                    chunk_current.end = self.dt_to_ctz(activity.time_ended())

                chunk_current.activities.append(activity)
                chunk_current.end = min(self.dt_to_ctz(activity.time_ended()), dt_end_ctz)

            if chunk_current is not None:
                chunks += 1

                classtime_table.drop_into_column_old(chunk_current, column)
                chunk_current.description()

            column += 1

        logging.info("old rows="+str(rows)+", old chunks="+str(chunks))
        classtime_table.balance()
        return classtime_table
Ejemplo n.º 16
0
    def get(self):
        query = self.request.get('page_search_query')
        template_values = {'page_search_query': query}
        query = query.strip()
        if len(query) < search.SEARCH_PHRASE_MIN_LENGTH:
            if len(query) > 0:
                template_values.update({
                    'query_too_short': search.SEARCH_PHRASE_MIN_LENGTH
                })
            self.render_jinja2_template("searchresults.html", template_values)
            return
        searched_phrases = []

        # Do an async query for all ExerciseVideos, since this may be slow
        exvids_query = ExerciseVideo.all()
        exvids_future = util.async_queries([exvids_query])

        # One full (non-partial) search, then sort by kind
        all_text_keys = Playlist.full_text_search(
                query, limit=50, kind=None,
                stemming=Playlist.INDEX_STEMMING,
                multi_word_literal=Playlist.INDEX_MULTI_WORD,
                searched_phrases_out=searched_phrases)


        # Quick title-only partial search
        playlist_partial_results = filter(
                lambda playlist_dict: query in playlist_dict["title"].lower(),
                autocomplete.playlist_title_dicts())
        video_partial_results = filter(
                lambda video_dict: query in video_dict["title"].lower(),
                autocomplete.video_title_dicts())

        # Combine results & do one big get!
        all_key_list = [str(key_and_title[0]) for key_and_title in all_text_keys]
        #all_key_list.extend([result["key"] for result in playlist_partial_results])
        all_key_list.extend([result["key"] for result in video_partial_results])
        all_key_list = list(set(all_key_list))
        all_entities = db.get(all_key_list)

        # Filter results by type
        playlists = []
        videos = []
        for entity in all_entities:
            if isinstance(entity, Playlist):
                playlists.append(entity)
            elif isinstance(entity, Video):
                videos.append(entity)
            elif entity is not None:
                logging.error("Unhandled kind in search results: " +
                              str(type(entity)))

        playlist_count = len(playlists)

        # Get playlists for videos not in matching playlists
        filtered_videos = []
        filtered_videos_by_key = {}
        for video in videos:
            if [(playlist.title in video.playlists) for playlist in playlists].count(True) == 0:
                video_playlist = video.first_playlist()
                if video_playlist != None:
                    playlists.append(video_playlist)
                    filtered_videos.append(video)
                    filtered_videos_by_key[str(video.key())] = []
            else:
                filtered_videos.append(video)
                filtered_videos_by_key[str(video.key())] = []
        video_count = len(filtered_videos)

        # Get the related exercises
        all_exercise_videos = exvids_future[0].get_result()
        exercise_keys = []
        for exvid in all_exercise_videos:
            video_key = str(ExerciseVideo.video.get_value_for_datastore(exvid))
            if video_key in filtered_videos_by_key:
                exercise_key = ExerciseVideo.exercise.get_value_for_datastore(exvid)
                video_exercise_keys = filtered_videos_by_key[video_key]
                video_exercise_keys.append(exercise_key)
                exercise_keys.append(exercise_key)
        exercises = db.get(exercise_keys)

        # Sort exercises with videos
        video_exercises = {}
        for video_key, exercise_keys in filtered_videos_by_key.iteritems():
            video_exercises[video_key] = map(lambda exkey: [exercise for exercise in exercises if exercise.key() == exkey][0], exercise_keys)

        # Count number of videos in each playlist and sort descending
        for playlist in playlists:
            if len(filtered_videos) > 0:
                playlist.match_count = [(playlist.title in video.playlists) for video in filtered_videos].count(True)
            else:
                playlist.match_count = 0
        playlists = sorted(playlists, key=lambda playlist: -playlist.match_count)

        template_values.update({
                           'playlists': playlists,
                           'videos': filtered_videos,
                           'video_exercises': video_exercises,
                           'search_string': query,
                           'video_count': video_count,
                           'playlist_count': playlist_count,
                           })
        self.render_jinja2_template("searchresults.html", template_values)
Ejemplo n.º 17
0
    def get_classtime_table_by_coach(self, user_data_coach, students_data,
                                     dt_start_utc):
        logging.info("getting classtime table for " + str(dt_start_utc))

        #ctz will be from midnight to midnight on the day they are looking at
        dt_start_ctz = self.dt_to_ctz(dt_start_utc)
        dt_end_ctz = dt_start_ctz + datetime.timedelta(days=1)

        classtime_table = ClassTimeTable(dt_start_ctz, dt_end_ctz)

        # midnight at PST is 7AM UTC and hence the coach's day in UTC goes from 7AM to 7AM the next day, spanning two different UTC days
        dt_end_utc = dt_start_utc + datetime.timedelta(days=1)

        # find the first utc days that spans the teacher's day
        dt_start_utc1 = datetime.datetime(dt_start_utc.year,
                                          dt_start_utc.month, dt_start_utc.day)
        dt_end_utc1 = dt_start_utc1 + datetime.timedelta(days=1)

        # get the query to get the summary shards from the first day
        log_summary_query_1 = LogSummary.get_by_name(
            LogSummary.get_name_by_dates(user_data_coach,
                                         LogSummaryTypes.CLASS_DAILY_ACTIVITY,
                                         dt_start_utc1, dt_end_utc1))

        # find the second utc day that spans the teacher's day
        dt_start_utc2 = dt_end_utc1
        dt_end_utc2 = dt_start_utc2 + datetime.timedelta(days=1)

        log_summary_query_2 = LogSummary.get_by_name(
            LogSummary.get_name_by_dates(user_data_coach,
                                         LogSummaryTypes.CLASS_DAILY_ACTIVITY,
                                         dt_start_utc2, dt_end_utc2))

        results = util.async_queries(
            [log_summary_query_1, log_summary_query_2], limit=10000)

        class_summary_shards = results[0].get_result()
        class_summary = None
        if class_summary_shards:
            class_summary = reduce(
                lambda x, y: x.merge_shard(y),
                map(lambda x: x.summary, class_summary_shards))

        class_summary_day2_shards = results[1].get_result()
        class_summary_day2 = None
        if class_summary_day2_shards:
            class_summary_day2 = reduce(
                lambda x, y: x.merge_shard(y),
                map(lambda x: x.summary, class_summary_day2_shards))

        if class_summary_day2 is not None:
            if class_summary is not None:
                class_summary.merge_day(class_summary_day2)
            else:
                class_summary = class_summary_day2

        if not class_summary:
            return classtime_table

        rows = 0
        # only consider sudents that are in the coach's currently looked at list (some students might have stopped having their current coach, or we might only be interested in a coach's student_list
        for i, user_data_student in enumerate(students_data):

            # check to see if the current student has had any activity
            if class_summary.student_dict.has_key(user_data_student.user):

                # loop over all chunks of that day
                for adjacent_activity_summary in class_summary.student_dict[
                        user_data_student.user]:

                    # make sure the chunk falls within the day specified by the coach's timezone
                    if adjacent_activity_summary.start > dt_start_utc and adjacent_activity_summary.start < dt_end_utc:

                        rows += 1
                        adjacent_activity_summary.setTimezoneOffset(
                            self.timezone_offset)

                        classtime_table.drop_into_column(
                            adjacent_activity_summary, i)

        logging.info("summary by coach rows=" + str(rows))

        return classtime_table
Ejemplo n.º 18
0
    def get(self):
        query = self.request.get('page_search_query')
        template_values = {'page_search_query': query}
        query = query.strip()
        if len(query) < search.SEARCH_PHRASE_MIN_LENGTH:
            if len(query) > 0:
                template_values.update(
                    {'query_too_short': search.SEARCH_PHRASE_MIN_LENGTH})
            self.render_jinja2_template("searchresults.html", template_values)
            return

        # Do an async query for all ExerciseVideos, since this may be slow
        exvids_query = ExerciseVideo.all()
        exvids_future = util.async_queries([exvids_query])

        # One full (non-partial) search, then sort by kind
        all_text_keys = Topic.full_text_search(
            query,
            limit=50,
            kind=None,
            stemming=Topic.INDEX_STEMMING,
            multi_word_literal=Topic.INDEX_MULTI_WORD)

        # Quick title-only partial search
        topic_partial_results = filter(
            lambda topic_dict: query in topic_dict["title"].lower(),
            autocomplete.topic_title_dicts())
        video_partial_results = filter(
            lambda video_dict: query in video_dict["title"].lower(),
            autocomplete.video_title_dicts())
        url_partial_results = filter(
            lambda url_dict: query in url_dict["title"].lower(),
            autocomplete.url_title_dicts())

        # Combine results & do one big get!
        all_key_list = [
            str(key_and_title[0]) for key_and_title in all_text_keys
        ]
        # all_key_list.extend([result["key"] for result in topic_partial_results])
        all_key_list.extend(
            [result["key"] for result in video_partial_results])
        all_key_list.extend([result["key"] for result in url_partial_results])
        all_key_list = list(set(all_key_list))

        # Filter out anything that isn't a Topic, Url or Video
        all_key_list = [
            key for key in all_key_list
            if db.Key(key).kind() in ["Topic", "Url", "Video"]
        ]

        # Get all the entities
        all_entities = db.get(all_key_list)

        # Group results by type
        topics = []
        videos = []
        for entity in all_entities:
            if isinstance(entity, Topic):
                topics.append(entity)
            elif isinstance(entity, Video):
                videos.append(entity)
            elif isinstance(entity, Url):
                videos.append(entity)
            elif entity:
                logging.info("Found unknown object " + repr(entity))

        topic_count = len(topics)

        # Get topics for videos not in matching topics
        filtered_videos = []
        filtered_videos_by_key = {}
        for video in videos:
            if [(str(topic.key()) in video.topic_string_keys)
                    for topic in topics].count(True) == 0:
                video_topic = video.first_topic()
                if video_topic != None:
                    topics.append(video_topic)
                    filtered_videos.append(video)
                    filtered_videos_by_key[str(video.key())] = []
            else:
                filtered_videos.append(video)
                filtered_videos_by_key[str(video.key())] = []
        video_count = len(filtered_videos)

        # Get the related exercises
        all_exercise_videos = exvids_future[0].get_result()
        exercise_keys = []
        for exvid in all_exercise_videos:
            video_key = str(ExerciseVideo.video.get_value_for_datastore(exvid))
            if video_key in filtered_videos_by_key:
                exercise_key = ExerciseVideo.exercise.get_value_for_datastore(
                    exvid)
                video_exercise_keys = filtered_videos_by_key[video_key]
                video_exercise_keys.append(exercise_key)
                exercise_keys.append(exercise_key)
        exercises = db.get(exercise_keys)

        # Sort exercises with videos
        video_exercises = {}
        for video_key, exercise_keys in filtered_videos_by_key.iteritems():
            video_exercises[video_key] = map(
                lambda exkey: [
                    exercise for exercise in exercises
                    if exercise.key() == exkey
                ][0], exercise_keys)

        # Count number of videos in each topic and sort descending
        if topics:
            if len(filtered_videos) > 0:
                for topic in topics:
                    topic.match_count = [
                        (str(topic.key()) in video.topic_string_keys)
                        for video in filtered_videos
                    ].count(True)
                topics = sorted(topics, key=lambda topic: -topic.match_count)
            else:
                for topic in topics:
                    topic.match_count = 0

        template_values.update({
            'topics': topics,
            'videos': filtered_videos,
            'video_exercises': video_exercises,
            'search_string': query,
            'video_count': video_count,
            'topic_count': topic_count,
        })

        self.render_jinja2_template("searchresults.html", template_values)
Ejemplo n.º 19
0
    def get_classtime_table_by_coach(self, user_data_coach, students_data, dt_start_utc):
        logging.info("getting classtime table for "+str(dt_start_utc))
        
        #ctz will be from midnight to midnight on the day they are looking at
        dt_start_ctz = self.dt_to_ctz(dt_start_utc)
        dt_end_ctz = dt_start_ctz + datetime.timedelta(days = 1)
        
        classtime_table = ClassTimeTable(dt_start_ctz, dt_end_ctz)   

        # midnight at PST is 7AM UTC and hence the coach's day in UTC goes from 7AM to 7AM the next day, spanning two different UTC days 
        dt_end_utc = dt_start_utc + datetime.timedelta(days = 1)

        # find the first utc days that spans the teacher's day
        dt_start_utc1 = datetime.datetime(dt_start_utc.year, dt_start_utc.month, dt_start_utc.day)
        dt_end_utc1 = dt_start_utc1 + datetime.timedelta(days = 1)

        # get the query to get the summary shards from the first day
        log_summary_query_1 = LogSummary.get_by_name(LogSummary.get_name_by_dates(user_data_coach, LogSummaryTypes.CLASS_DAILY_ACTIVITY, dt_start_utc1, dt_end_utc1))
        

        # find the second utc day that spans the teacher's day
        dt_start_utc2 = dt_end_utc1
        dt_end_utc2 = dt_start_utc2 + datetime.timedelta(days = 1)

        log_summary_query_2 = LogSummary.get_by_name(LogSummary.get_name_by_dates(user_data_coach, LogSummaryTypes.CLASS_DAILY_ACTIVITY, dt_start_utc2, dt_end_utc2))

        results = util.async_queries([log_summary_query_1, log_summary_query_2], limit = 10000)

        class_summary_shards = results[0].get_result()
        class_summary = None
        if class_summary_shards: 
            class_summary = reduce(lambda x, y: x.merge_shard(y), map(lambda x: x.summary, class_summary_shards)) 

        class_summary_day2_shards = results[1].get_result()
        class_summary_day2 = None
        if class_summary_day2_shards:
            class_summary_day2 = reduce(lambda x, y: x.merge_shard(y), map(lambda x: x.summary, class_summary_day2_shards))

        if class_summary_day2 is not None:
            if class_summary is not None :        
                class_summary.merge_day(class_summary_day2)
            else:
                class_summary = class_summary_day2
        
        if not class_summary:
            return classtime_table

        rows = 0
        # only consider sudents that are in the coach's currently looked at list (some students might have stopped having their current coach, or we might only be interested in a coach's student_list
        for i, user_data_student in enumerate(students_data):

            # check to see if the current student has had any activity 
            if class_summary.student_dict.has_key(user_data_student.user):
                    
                # loop over all chunks of that day
                for adjacent_activity_summary in class_summary.student_dict[user_data_student.user]:

                    # make sure the chunk falls within the day specified by the coach's timezone
                    if adjacent_activity_summary.start > dt_start_utc and adjacent_activity_summary.start < dt_end_utc:
                    
                        rows += 1
                        adjacent_activity_summary.setTimezoneOffset(self.timezone_offset)

                        classtime_table.drop_into_column(adjacent_activity_summary, i)      
        
        logging.info("summary by coach rows="+str(rows))

        return classtime_table 
Ejemplo n.º 20
0
    def get_old(self):
        """ Deprecated old version of search, so we can Gandalf in the new one.

        If new search is working, this should be taken out by May 31, 2012.
        """

        show_update = False
        if App.is_dev_server and user_util.is_current_user_admin():
            update = self.request_bool("update", False)
            if update:
                self.update()

            version_number = layer_cache.KeyValueCache.get(
                "last_dev_topic_vesion_indexed")
            default_version = topic_models.TopicVersion.get_default_version()
            if version_number != default_version.number:
                show_update = True

        query = self.request.get('page_search_query')
        template_values = {'page_search_query': query}
        query = query.strip()
        if len(query) < search.SEARCH_PHRASE_MIN_LENGTH:
            if len(query) > 0:
                template_values.update(
                    {'query_too_short': search.SEARCH_PHRASE_MIN_LENGTH})
            self.render_jinja2_template("searchresults.html", template_values)
            return
        searched_phrases = []

        # Do an async query for all ExerciseVideos, since this may be slow
        exvids_query = ExerciseVideo.all()
        exvids_future = util.async_queries([exvids_query])

        # One full (non-partial) search, then sort by kind
        all_text_keys = Topic.full_text_search(
            query,
            limit=50,
            kind=None,
            stemming=Topic.INDEX_STEMMING,
            multi_word_literal=Topic.INDEX_MULTI_WORD,
            searched_phrases_out=searched_phrases)

        # Quick title-only partial search
        topic_partial_results = filter(
            lambda topic_dict: query in topic_dict["title"].lower(),
            autocomplete.topic_title_dicts())
        video_partial_results = filter(
            lambda video_dict: query in video_dict["title"].lower(),
            autocomplete.video_title_dicts())
        url_partial_results = filter(
            lambda url_dict: query in url_dict["title"].lower(),
            autocomplete.url_title_dicts())

        # Combine results & do one big get!
        all_keys = [str(key_and_title[0]) for key_and_title in all_text_keys]
        all_keys.extend([result["key"] for result in topic_partial_results])
        all_keys.extend([result["key"] for result in video_partial_results])
        all_keys.extend([result["key"] for result in url_partial_results])
        all_keys = list(set(all_keys))

        # Filter out anything that isn't a Topic, Url or Video
        all_keys = [
            key for key in all_keys
            if db.Key(key).kind() in ["Topic", "Url", "Video"]
        ]

        # Get all the entities
        all_entities = db.get(all_keys)

        # Group results by type
        topics = []
        videos = []
        for entity in all_entities:
            if isinstance(entity, Topic):
                topics.append(entity)
            elif isinstance(entity, Video):
                videos.append(entity)
            elif isinstance(entity, Url):
                videos.append(entity)
            elif entity:
                logging.info("Found unknown object " + repr(entity))

        # Get topics for videos not in matching topics
        filtered_videos = []
        filtered_videos_by_key = {}
        for video in videos:
            if [(str(topic.key()) in video.topic_string_keys)
                    for topic in topics].count(True) == 0:
                video_topic = video.first_topic()
                if video_topic != None:
                    topics.append(video_topic)
                    filtered_videos.append(video)
                    filtered_videos_by_key[str(video.key())] = []
            else:
                filtered_videos.append(video)
                filtered_videos_by_key[str(video.key())] = []
        video_count = len(filtered_videos)

        # Get the related exercises
        all_exercise_videos = exvids_future[0].get_result()
        exercise_keys = []
        for exvid in all_exercise_videos:
            video_key = str(ExerciseVideo.video.get_value_for_datastore(exvid))
            if video_key in filtered_videos_by_key:
                exercise_key = ExerciseVideo.exercise.get_value_for_datastore(
                    exvid)
                video_exercise_keys = filtered_videos_by_key[video_key]
                video_exercise_keys.append(exercise_key)
                exercise_keys.append(exercise_key)
        exercises = db.get(exercise_keys)

        # Sort exercises with videos
        video_exercises = {}
        for video_key, exercise_keys in filtered_videos_by_key.iteritems():
            video_exercises[video_key] = map(
                lambda exkey: [
                    exercise for exercise in exercises
                    if exercise.key() == exkey
                ][0], exercise_keys)

        # Count number of videos in each topic and sort descending
        topic_count = 0
        matching_topic_count = 0
        if topics:
            if len(filtered_videos) > 0:
                for topic in topics:
                    topic.match_count = [
                        (str(topic.key()) in video.topic_string_keys)
                        for video in filtered_videos
                    ].count(True)
                    if topic.match_count > 0:
                        topic_count += 1

                topics = sorted(topics,
                                key=lambda topic: topic.match_count,
                                reverse=True)
            else:
                for topic in topics:
                    topic.match_count = 0

            for topic in topics:
                if topic.title.lower() == query:
                    topic.matches = True
                    matching_topic_count += 1

                    child_topics = topic.get_child_topics(
                        include_descendants=True)
                    topic.child_topics = [
                        t for t in child_topics if t.has_content()
                    ]

        template_values.update({
            'show_update': show_update,
            'topics': topics,
            'videos': filtered_videos,
            'video_exercises': video_exercises,
            'search_string': query,
            'video_count': video_count,
            'topic_count': topic_count,
            'matching_topic_count': matching_topic_count
        })

        self.render_jinja2_template("searchresults.html", template_values)
Ejemplo n.º 21
0
    def get_classtime_table_old(self, students_data, dt_start_utc):

        dt_start_ctz = self.dt_to_ctz(dt_start_utc)
        dt_end_ctz = dt_start_ctz + datetime.timedelta(days=1)

        column = 0

        classtime_table = ClassTimeTable(dt_start_ctz, dt_end_ctz)

        # Asynchronously grab all student data at once
        async_queries = []
        for user_data_student in students_data:

            query_problem_logs = ProblemLog.get_for_user_data_between_dts(
                user_data_student, self.dt_to_utc(dt_start_ctz),
                self.dt_to_utc(dt_end_ctz))
            query_video_logs = VideoLog.get_for_user_data_between_dts(
                user_data_student, self.dt_to_utc(dt_start_ctz),
                self.dt_to_utc(dt_end_ctz))

            async_queries.append(query_problem_logs)
            async_queries.append(query_video_logs)

        # Wait for all queries to finish
        results = util.async_queries(async_queries, limit=10000)

        rows = 0
        chunks = 0
        for i, user_data_student in enumerate(students_data):

            problem_logs = results[i * 2].get_result()
            video_logs = results[i * 2 + 1].get_result()

            problem_and_video_logs = []

            for problem_log in problem_logs:
                problem_and_video_logs.append(problem_log)
            for video_log in video_logs:
                problem_and_video_logs.append(video_log)

            problem_and_video_logs = sorted(problem_and_video_logs,
                                            key=lambda log: log.time_started())
            rows += len(problem_and_video_logs)

            chunk_current = None

            for activity in problem_and_video_logs:

                if chunk_current is not None and self.dt_to_ctz(
                        activity.time_started()) > (chunk_current.end +
                                                    self.chunk_delta):
                    chunks += 1

                    classtime_table.drop_into_column_old(chunk_current, column)
                    chunk_current.description()
                    chunk_current = None

                if chunk_current is None:
                    chunk_current = ClassTimeChunk()
                    chunk_current.user_data_student = user_data_student
                    chunk_current.start = self.dt_to_ctz(
                        activity.time_started())
                    chunk_current.end = self.dt_to_ctz(activity.time_ended())

                chunk_current.activities.append(activity)
                chunk_current.end = min(self.dt_to_ctz(activity.time_ended()),
                                        dt_end_ctz)

            if chunk_current is not None:
                chunks += 1

                classtime_table.drop_into_column_old(chunk_current, column)
                chunk_current.description()

            column += 1

        logging.info("old rows=" + str(rows) + ", old chunks=" + str(chunks))
        classtime_table.balance()
        return classtime_table