コード例 #1
0
def get_annotated_topic_tree(request, lang_code=None):
    call_command("videoscan")  # Could potentially be very slow, blocking request... but at least it's via an API request!

    lang_code = lang_code or request.language      # Get annotations for the current language.
    statusdict = dict(VideoFile.objects.values_list("youtube_id", "percent_complete"))

    return JsonResponse(annotate_topic_tree(get_topic_tree(), statusdict=statusdict, lang_code=lang_code))
コード例 #2
0
ファイル: views.py プロジェクト: julianharty/ka-lite-central
def splat_handler(request, splat):
    slugs = filter(lambda x: x, splat.split("/"))
    current_node = topic_tools.get_topic_tree()
    while current_node:
        match = [ch for ch in (current_node.get('children') or []) if request.path.startswith(ch["path"])]
        if len(match) > 1:  # can only happen for leaf nodes (only when one node is blank?)
            match = [m for m in match if request.path == m["path"]]
        if not match:
            raise Http404
        current_node = match[0]
        if request.path == current_node["path"]:
            break

    if current_node["kind"] == "Topic":
        return topic_handler(request, cached_nodes={"topic": current_node})
    elif current_node["kind"] == "Video":
        prev, next = get_neighbor_nodes(current_node, neighbor_kind=current_node["kind"])
        return video_handler(request, cached_nodes={"video": current_node, "prev": prev, "next": next})
    elif current_node["kind"] == "Exercise":
        cached_nodes = topic_tools.get_related_videos(current_node, limit_to_available=False)
        cached_nodes["exercise"] = current_node
        cached_nodes["prev"], cached_nodes["next"] = get_neighbor_nodes(current_node, neighbor_kind=current_node['kind'])
        return exercise_handler(request, cached_nodes=cached_nodes)
    else:
        raise Http404
コード例 #3
0
def get_annotated_topic_tree(request, lang_code=None):
    call_command("videoscan")  # Could potentially be very slow, blocking request... but at least it's via an API request!

    lang_code = lang_code or request.language      # Get annotations for the current language.
    statusdict = dict(VideoFile.objects.values_list("youtube_id", "percent_complete"))

    return JsonResponse(annotate_topic_tree(get_topic_tree(), statusdict=statusdict, lang_code=lang_code))
コード例 #4
0
ファイル: base.py プロジェクト: jeepurs/ka-lite-central
    def setUp(self):
        super(UpdatesTestCase, self).setUp()

        # Set up the topic tree
        stamp_availability_on_topic(get_topic_tree(),
                                    force=True,
                                    stamp_urls=True)
コード例 #5
0
ファイル: kaserve.py プロジェクト: jeepurs/ka-lite-central
 def preload_global_data():
     if not settings.CENTRAL_SERVER:
         logging.info("Preloading topic data.")
         from kalite.main.topic_tools import get_topic_tree
         from kalite.updates import stamp_availability_on_topic
         stamp_availability_on_topic(get_topic_tree(),
                                     force=True,
                                     stamp_urls=True)
コード例 #6
0
ファイル: caching.py プロジェクト: jeepurs/ka-lite-central
def regenerate_all_pages_related_to_videos(video_ids):
    """Regenerate all webpages related to a specific list of videos.  This is good for increasing new server performance."""
    paths_to_regenerate = set()  # unique set
    for video_id in video_ids:

        for video_path in topic_tools.get_video_page_paths(video_id=video_id):
            paths_to_regenerate = paths_to_regenerate.union(
                generate_all_paths(path=video_path,
                                   base_path=topic_tools.get_topic_tree()
                                   ['path']))  # start at the root
        for exercise_path in topic_tools.get_exercise_page_paths(
                video_id=video_id):
            paths_to_regenerate = paths_to_regenerate.union(
                generate_all_paths(path=exercise_path,
                                   base_path=topic_tools.get_topic_tree()
                                   ['path']))  # start at the root

    # Now, regenerate any page.
    for path in paths_to_regenerate:
        create_cache_entry(path=path, force=True)

    return paths_to_regenerate
コード例 #7
0
def invalidate_all_pages_related_to_video(video_id=None):
    """Given a video file, recurse backwards up the hierarchy and invalidate all pages.
    Also include video pages and related exercise pages.
    """

    # Expire all video files and related paths
    video_paths = topic_tools.get_video_page_paths(video_id=video_id)
    exercise_paths = topic_tools.get_exercise_page_paths(video_id=video_id)
    leaf_paths = set(video_paths).union(set(exercise_paths))

    for leaf_path in leaf_paths:
        all_paths = generate_all_paths(path=leaf_path, base_path=topic_tools.get_topic_tree()['path'])
        for path in filter(has_cache_key, all_paths):  # start at the root
            expire_page(path=path)
コード例 #8
0
ファイル: views.py プロジェクト: mashuq05/ka-lite-central
    def refresh_topic_cache_wrapper_fn(request,
                                       cached_nodes={},
                                       force=False,
                                       *args,
                                       **kwargs):
        """
        Centralized logic for how to refresh the topic cache, for each type of object.

        When the object is desired to be used, this code runs to refresh data,
        balancing between correctness and efficiency.
        """
        if not cached_nodes:
            cached_nodes = {"topics": topic_tools.get_topic_tree()}

        def has_computed_urls(node):
            return "subtitles" in node.get("availability", {}).get("en", {})

        for node in cached_nodes.values():
            if not node:
                continue
            has_children = bool(node.get("children"))

            # Propertes not yet marked
            if node["kind"] == "Video":
                if force or not has_computed_urls(node):
                    recount_videos_and_invalidate_parents(get_parent(node),
                                                          force=True,
                                                          stamp_urls=True)

            elif node["kind"] == "Exercise":
                for video in topic_tools.get_related_videos(
                        exercise=node).values():
                    if not has_computed_urls(node):
                        stamp_availability_on_video(
                            video, force=True)  # will be done by force below

            elif node["kind"] == "Topic":
                bottom_layer_topic = "Topic" not in node["contains"]
                # always run do_video_counts_need_update_question_mark(), to make sure the (internal) counts stay up to date.
                force = do_video_counts_need_update_question_mark(
                ) or force or bottom_layer_topic
                recount_videos_and_invalidate_parents(
                    node,
                    force=force,
                    stamp_urls=bottom_layer_topic,
                )

        kwargs.update(cached_nodes)
        return handler(request, *args, **kwargs)
コード例 #9
0
ファイル: caching.py プロジェクト: jeepurs/ka-lite-central
def invalidate_all_pages_related_to_video(video_id=None):
    """Given a video file, recurse backwards up the hierarchy and invalidate all pages.
    Also include video pages and related exercise pages.
    """

    # Expire all video files and related paths
    video_paths = topic_tools.get_video_page_paths(video_id=video_id)
    exercise_paths = topic_tools.get_exercise_page_paths(video_id=video_id)
    leaf_paths = set(video_paths).union(set(exercise_paths))

    for leaf_path in leaf_paths:
        all_paths = generate_all_paths(
            path=leaf_path, base_path=topic_tools.get_topic_tree()['path'])
        for path in filter(has_cache_key, all_paths):  # start at the root
            expire_page(path=path)
コード例 #10
0
ファイル: views.py プロジェクト: julianharty/ka-lite-central
    def refresh_topic_cache_wrapper_fn(request, cached_nodes={}, force=False, *args, **kwargs):
        """
        Centralized logic for how to refresh the topic cache, for each type of object.

        When the object is desired to be used, this code runs to refresh data,
        balancing between correctness and efficiency.
        """
        if not cached_nodes:
            cached_nodes = {"topics": topic_tools.get_topic_tree()}

        def has_computed_urls(node):
            return "subtitles" in node.get("availability", {}).get("en", {})

        for node in cached_nodes.values():
            if not node:
                continue
            has_children = bool(node.get("children"))

            # Propertes not yet marked
            if node["kind"] == "Video":
                if force or not has_computed_urls(node):
                    recount_videos_and_invalidate_parents(get_parent(node), force=True, stamp_urls=True)

            elif node["kind"] == "Exercise":
                for video in topic_tools.get_related_videos(exercise=node).values():
                    if not has_computed_urls(node):
                        stamp_availability_on_video(video, force=True)  # will be done by force below

            elif node["kind"] == "Topic":
                bottom_layer_topic =  "Topic" not in node["contains"]
                # always run do_video_counts_need_update_question_mark(), to make sure the (internal) counts stay up to date.
                force = do_video_counts_need_update_question_mark() or force or bottom_layer_topic
                recount_videos_and_invalidate_parents(
                    node,
                    force=force,
                    stamp_urls=bottom_layer_topic,
                )

        kwargs.update(cached_nodes)
        return handler(request, *args, **kwargs)
コード例 #11
0
ファイル: views.py プロジェクト: mashuq05/ka-lite-central
def splat_handler(request, splat):
    slugs = filter(lambda x: x, splat.split("/"))
    current_node = topic_tools.get_topic_tree()
    while current_node:
        match = [
            ch for ch in (current_node.get('children') or [])
            if request.path.startswith(ch["path"])
        ]
        if len(
                match
        ) > 1:  # can only happen for leaf nodes (only when one node is blank?)
            match = [m for m in match if request.path == m["path"]]
        if not match:
            raise Http404
        current_node = match[0]
        if request.path == current_node["path"]:
            break

    if current_node["kind"] == "Topic":
        return topic_handler(request, cached_nodes={"topic": current_node})
    elif current_node["kind"] == "Video":
        prev, next = get_neighbor_nodes(current_node,
                                        neighbor_kind=current_node["kind"])
        return video_handler(request,
                             cached_nodes={
                                 "video": current_node,
                                 "prev": prev,
                                 "next": next
                             })
    elif current_node["kind"] == "Exercise":
        cached_nodes = topic_tools.get_related_videos(current_node,
                                                      limit_to_available=False)
        cached_nodes["exercise"] = current_node
        cached_nodes["prev"], cached_nodes["next"] = get_neighbor_nodes(
            current_node, neighbor_kind=current_node['kind'])
        return exercise_handler(request, cached_nodes=cached_nodes)
    else:
        raise Http404
コード例 #12
0
def student_view_context(request, xaxis="pct_mastery", yaxis="ex:attempts"):
    """
    Context done separately, to be importable for similar pages.
    """
    user = get_user_from_request(request=request)
    if not user:
        raise Http404("User not found.")

    node_cache = get_node_cache()
    topic_ids = get_knowledgemap_topics()
    topic_ids += [
        ch["id"] for node in get_topic_tree()["children"]
        for ch in node["children"] if node["id"] != "math"
    ]
    topics = [node_cache["Topic"][id][0] for id in topic_ids]

    user_id = user.id
    exercise_logs = list(ExerciseLog.objects \
        .filter(user=user) \
        .values("exercise_id", "complete", "points", "attempts", "streak_progress", "struggling", "completion_timestamp"))
    video_logs = list(VideoLog.objects \
        .filter(user=user) \
        .values("video_id", "complete", "total_seconds_watched", "points", "completion_timestamp"))

    exercise_sparklines = dict()
    stats = dict()
    topic_exercises = dict()
    topic_videos = dict()
    exercises_by_topic = dict()
    videos_by_topic = dict()

    # Categorize every exercise log into a "midlevel" exercise
    for elog in exercise_logs:
        if not elog["exercise_id"] in node_cache["Exercise"]:
            # Sometimes KA updates their topic tree and eliminates exercises;
            #   we also want to support 3rd party switching of trees arbitrarily.
            logging.debug("Skip unknown exercise log for %s/%s" %
                          (user_id, elog["exercise_id"]))
            continue

        parent_ids = [
            topic for ex in node_cache["Exercise"][elog["exercise_id"]]
            for topic in ex["ancestor_ids"]
        ]
        topic = set(parent_ids).intersection(set(topic_ids))
        if not topic:
            logging.error(
                "Could not find a topic for exercise %s (parents=%s)" %
                (elog["exercise_id"], parent_ids))
            continue
        topic = topic.pop()
        if not topic in topic_exercises:
            topic_exercises[topic] = get_topic_exercises(
                path=node_cache["Topic"][topic][0]["path"])
        exercises_by_topic[topic] = exercises_by_topic.get(topic, []) + [elog]

    # Categorize every video log into a "midlevel" exercise.
    for vlog in video_logs:
        if not vlog["video_id"] in node_cache["Video"]:
            # Sometimes KA updates their topic tree and eliminates videos;
            #   we also want to support 3rd party switching of trees arbitrarily.
            logging.debug("Skip unknown video log for %s/%s" %
                          (user_id, vlog["video_id"]))
            continue

        parent_ids = [
            topic for vid in node_cache["Video"][vlog["video_id"]]
            for topic in vid["ancestor_ids"]
        ]
        topic = set(parent_ids).intersection(set(topic_ids))
        if not topic:
            logging.error("Could not find a topic for video %s (parents=%s)" %
                          (vlog["video_id"], parent_ids))
            continue
        topic = topic.pop()
        if not topic in topic_videos:
            topic_videos[topic] = get_topic_videos(
                path=node_cache["Topic"][topic][0]["path"])
        videos_by_topic[topic] = videos_by_topic.get(topic, []) + [vlog]

    # Now compute stats
    for id in topic_ids:  #set(topic_exercises.keys()).union(set(topic_videos.keys())):
        n_exercises = len(topic_exercises.get(id, []))
        n_videos = len(topic_videos.get(id, []))

        exercises = exercises_by_topic.get(id, [])
        videos = videos_by_topic.get(id, [])
        n_exercises_touched = len(exercises)
        n_videos_touched = len(videos)

        exercise_sparklines[id] = [
            el["completion_timestamp"]
            for el in filter(lambda n: n["complete"], exercises)
        ]

        # total streak currently a pct, but expressed in max 100; convert to
        # proportion (like other percentages here)
        stats[id] = {
            "ex:pct_mastery":
            0 if not n_exercises_touched else
            sum([el["complete"] for el in exercises]) / float(n_exercises),
            "ex:pct_started":
            0 if not n_exercises_touched else n_exercises_touched /
            float(n_exercises),
            "ex:average_points":
            0 if not n_exercises_touched else
            sum([el["points"]
                 for el in exercises]) / float(n_exercises_touched),
            "ex:average_attempts":
            0 if not n_exercises_touched else
            sum([el["attempts"]
                 for el in exercises]) / float(n_exercises_touched),
            "ex:average_streak":
            0 if not n_exercises_touched else
            sum([el["streak_progress"]
                 for el in exercises]) / float(n_exercises_touched) / 100.,
            "ex:total_struggling":
            0 if not n_exercises_touched else sum(
                [el["struggling"] for el in exercises]),
            "ex:last_completed":
            None if not n_exercises_touched else max_none(
                [el["completion_timestamp"] or None for el in exercises]),
            "vid:pct_started":
            0 if not n_videos_touched else n_videos_touched / float(n_videos),
            "vid:pct_completed":
            0 if not n_videos_touched else
            sum([vl["complete"] for vl in videos]) / float(n_videos),
            "vid:total_minutes":
            0 if not n_videos_touched else
            sum([vl["total_seconds_watched"] for vl in videos]) / 60.,
            "vid:average_points":
            0. if not n_videos_touched else float(
                sum([vl["points"]
                     for vl in videos]) / float(n_videos_touched)),
            "vid:last_completed":
            None if not n_videos_touched else max_none(
                [vl["completion_timestamp"] or None for vl in videos]),
        }

    context = plotting_metadata_context(request)

    return {
        "form":
        context["form"],
        "groups":
        context["groups"],
        "facilities":
        context["facilities"],
        "student":
        user,
        "topics":
        topics,
        "exercises":
        topic_exercises,
        "exercise_logs":
        exercises_by_topic,
        "video_logs":
        videos_by_topic,
        "exercise_sparklines":
        exercise_sparklines,
        "no_data":
        not exercise_logs and not video_logs,
        "stats":
        stats,
        "stat_defs": [  # this order determines the order of display
            {
                "key": "ex:pct_mastery",
                "title": _("% Mastery"),
                "type": "pct"
            },
            {
                "key": "ex:pct_started",
                "title": _("% Started"),
                "type": "pct"
            },
            {
                "key": "ex:average_points",
                "title": _("Average Points"),
                "type": "float"
            },
            {
                "key": "ex:average_attempts",
                "title": _("Average Attempts"),
                "type": "float"
            },
            {
                "key": "ex:average_streak",
                "title": _("Average Streak"),
                "type": "pct"
            },
            {
                "key": "ex:total_struggling",
                "title": _("Struggling"),
                "type": "int"
            },
            {
                "key": "ex:last_completed",
                "title": _("Last Completed"),
                "type": "date"
            },
            {
                "key": "vid:pct_completed",
                "title": _("% Completed"),
                "type": "pct"
            },
            {
                "key": "vid:pct_started",
                "title": _("% Started"),
                "type": "pct"
            },
            {
                "key": "vid:total_minutes",
                "title": _("Average Minutes Watched"),
                "type": "float"
            },
            {
                "key": "vid:average_points",
                "title": _("Average Points"),
                "type": "float"
            },
            {
                "key": "vid:last_completed",
                "title": _("Last Completed"),
                "type": "date"
            },
        ]
    }
コード例 #13
0
ファイル: views.py プロジェクト: julianharty/ka-lite-central
def student_view_context(request, xaxis="pct_mastery", yaxis="ex:attempts"):
    """
    Context done separately, to be importable for similar pages.
    """
    user = get_user_from_request(request=request)
    if not user:
        raise Http404("User not found.")

    node_cache = get_node_cache()
    topic_ids = get_knowledgemap_topics()
    topic_ids += [ch["id"] for node in get_topic_tree()["children"] for ch in node["children"] if node["id"] != "math"]
    topics = [node_cache["Topic"][id][0] for id in topic_ids]

    user_id = user.id
    exercise_logs = list(
        ExerciseLog.objects.filter(user=user).values(
            "exercise_id", "complete", "points", "attempts", "streak_progress", "struggling", "completion_timestamp"
        )
    )
    video_logs = list(
        VideoLog.objects.filter(user=user).values(
            "video_id", "complete", "total_seconds_watched", "points", "completion_timestamp"
        )
    )

    exercise_sparklines = dict()
    stats = dict()
    topic_exercises = dict()
    topic_videos = dict()
    exercises_by_topic = dict()
    videos_by_topic = dict()

    # Categorize every exercise log into a "midlevel" exercise
    for elog in exercise_logs:
        if not elog["exercise_id"] in node_cache["Exercise"]:
            # Sometimes KA updates their topic tree and eliminates exercises;
            #   we also want to support 3rd party switching of trees arbitrarily.
            logging.debug("Skip unknown exercise log for %s/%s" % (user_id, elog["exercise_id"]))
            continue

        parent_ids = [topic for ex in node_cache["Exercise"][elog["exercise_id"]] for topic in ex["ancestor_ids"]]
        topic = set(parent_ids).intersection(set(topic_ids))
        if not topic:
            logging.error("Could not find a topic for exercise %s (parents=%s)" % (elog["exercise_id"], parent_ids))
            continue
        topic = topic.pop()
        if not topic in topic_exercises:
            topic_exercises[topic] = get_topic_exercises(path=node_cache["Topic"][topic][0]["path"])
        exercises_by_topic[topic] = exercises_by_topic.get(topic, []) + [elog]

    # Categorize every video log into a "midlevel" exercise.
    for vlog in video_logs:
        if not vlog["video_id"] in node_cache["Video"]:
            # Sometimes KA updates their topic tree and eliminates videos;
            #   we also want to support 3rd party switching of trees arbitrarily.
            logging.debug("Skip unknown video log for %s/%s" % (user_id, vlog["video_id"]))
            continue

        parent_ids = [topic for vid in node_cache["Video"][vlog["video_id"]] for topic in vid["ancestor_ids"]]
        topic = set(parent_ids).intersection(set(topic_ids))
        if not topic:
            logging.error("Could not find a topic for video %s (parents=%s)" % (vlog["video_id"], parent_ids))
            continue
        topic = topic.pop()
        if not topic in topic_videos:
            topic_videos[topic] = get_topic_videos(path=node_cache["Topic"][topic][0]["path"])
        videos_by_topic[topic] = videos_by_topic.get(topic, []) + [vlog]

    # Now compute stats
    for id in topic_ids:  # set(topic_exercises.keys()).union(set(topic_videos.keys())):
        n_exercises = len(topic_exercises.get(id, []))
        n_videos = len(topic_videos.get(id, []))

        exercises = exercises_by_topic.get(id, [])
        videos = videos_by_topic.get(id, [])
        n_exercises_touched = len(exercises)
        n_videos_touched = len(videos)

        exercise_sparklines[id] = [el["completion_timestamp"] for el in filter(lambda n: n["complete"], exercises)]

        # total streak currently a pct, but expressed in max 100; convert to
        # proportion (like other percentages here)
        stats[id] = {
            "ex:pct_mastery": 0
            if not n_exercises_touched
            else sum([el["complete"] for el in exercises]) / float(n_exercises),
            "ex:pct_started": 0 if not n_exercises_touched else n_exercises_touched / float(n_exercises),
            "ex:average_points": 0
            if not n_exercises_touched
            else sum([el["points"] for el in exercises]) / float(n_exercises_touched),
            "ex:average_attempts": 0
            if not n_exercises_touched
            else sum([el["attempts"] for el in exercises]) / float(n_exercises_touched),
            "ex:average_streak": 0
            if not n_exercises_touched
            else sum([el["streak_progress"] for el in exercises]) / float(n_exercises_touched) / 100.0,
            "ex:total_struggling": 0 if not n_exercises_touched else sum([el["struggling"] for el in exercises]),
            "ex:last_completed": None
            if not n_exercises_touched
            else max_none([el["completion_timestamp"] or None for el in exercises]),
            "vid:pct_started": 0 if not n_videos_touched else n_videos_touched / float(n_videos),
            "vid:pct_completed": 0
            if not n_videos_touched
            else sum([vl["complete"] for vl in videos]) / float(n_videos),
            "vid:total_minutes": 0
            if not n_videos_touched
            else sum([vl["total_seconds_watched"] for vl in videos]) / 60.0,
            "vid:average_points": 0.0
            if not n_videos_touched
            else float(sum([vl["points"] for vl in videos]) / float(n_videos_touched)),
            "vid:last_completed": None
            if not n_videos_touched
            else max_none([vl["completion_timestamp"] or None for vl in videos]),
        }

    context = plotting_metadata_context(request)

    return {
        "form": context["form"],
        "groups": context["groups"],
        "facilities": context["facilities"],
        "student": user,
        "topics": topics,
        "exercises": topic_exercises,
        "exercise_logs": exercises_by_topic,
        "video_logs": videos_by_topic,
        "exercise_sparklines": exercise_sparklines,
        "no_data": not exercise_logs and not video_logs,
        "stats": stats,
        "stat_defs": [  # this order determines the order of display
            {"key": "ex:pct_mastery", "title": _("% Mastery"), "type": "pct"},
            {"key": "ex:pct_started", "title": _("% Started"), "type": "pct"},
            {"key": "ex:average_points", "title": _("Average Points"), "type": "float"},
            {"key": "ex:average_attempts", "title": _("Average Attempts"), "type": "float"},
            {"key": "ex:average_streak", "title": _("Average Streak"), "type": "pct"},
            {"key": "ex:total_struggling", "title": _("Struggling"), "type": "int"},
            {"key": "ex:last_completed", "title": _("Last Completed"), "type": "date"},
            {"key": "vid:pct_completed", "title": _("% Completed"), "type": "pct"},
            {"key": "vid:pct_started", "title": _("% Started"), "type": "pct"},
            {"key": "vid:total_minutes", "title": _("Average Minutes Watched"), "type": "float"},
            {"key": "vid:average_points", "title": _("Average Points"), "type": "float"},
            {"key": "vid:last_completed", "title": _("Last Completed"), "type": "date"},
        ],
    }
コード例 #14
0
ファイル: views.py プロジェクト: julianharty/ka-lite-central
def watch_home(request):
    """Dummy wrapper function for topic_handler with url=/"""
    return topic_handler(request, cached_nodes={"topic": get_topic_tree()})
コード例 #15
0
 def preload_global_data():
     if not settings.CENTRAL_SERVER:
         logging.info("Preloading topic data.")
         from kalite.main.topic_tools import get_topic_tree
         from kalite.updates import stamp_availability_on_topic
         stamp_availability_on_topic(get_topic_tree(), force=True, stamp_urls=True)
コード例 #16
0
def regenerate_all_pages_related_to_videos(video_ids):
    """Regenerate all webpages related to a specific list of videos.  This is good for increasing new server performance."""
    paths_to_regenerate = set() # unique set
    for video_id in video_ids:

        for video_path in topic_tools.get_video_page_paths(video_id=video_id):
            paths_to_regenerate = paths_to_regenerate.union(generate_all_paths(path=video_path, base_path=topic_tools.get_topic_tree()['path']))  # start at the root
        for exercise_path in topic_tools.get_exercise_page_paths(video_id=video_id):
            paths_to_regenerate = paths_to_regenerate.union(generate_all_paths(path=exercise_path, base_path=topic_tools.get_topic_tree()['path']))  # start at the root

    # Now, regenerate any page.
    for path in paths_to_regenerate:
        create_cache_entry(path=path, force=True)

    return paths_to_regenerate
コード例 #17
0
    def setUp(self):
        super(UpdatesTestCase, self).setUp()

        # Set up the topic tree
        stamp_availability_on_topic(get_topic_tree(), force=True, stamp_urls=True)
コード例 #18
0
ファイル: views.py プロジェクト: mashuq05/ka-lite-central
def watch_home(request):
    """Dummy wrapper function for topic_handler with url=/"""
    return topic_handler(request, cached_nodes={"topic": get_topic_tree()})