def page_content(lesson, page, solution=None, course=None, lesson_url=None, subpage_url=None, static_url=None, without_cache=False): variables = None if course is not None: variables = course.vars def content_creator(): """Return the content and all relative URLs used in it. Since the content is stored in cache and can be reused elsewhere, URLs must be stored as relative to the current page, so new absolute urls can be generated where the content is reused. """ with temporary_url_for_logger(app) as logger: with logger: content = page.render_html(solution=solution, static_url=static_url, lesson_url=lesson_url, subpage_url=subpage_url, vars=variables) absolute_urls = [ url_for(logged[0], **logged[1]) for logged in logger.logged_calls ] relative_urls = [ get_relative_url(request.path, x) for x in absolute_urls ] return {"content": content, "urls": relative_urls} # Only use the cache if there are no local changes # and not rendering in fork if without_cache or is_dirty(Repo(".")): return content_creator() # Since ARCA_IGNORE_CACHE_ERRORS is used, this won't fail in forks # even if the cache doesn't work. # This is only dangerous if the fork sets absolute path to cache and # CurrentEnvironmentBackend or VenvBackend are used locally. # FIXME? But I don't think there's a way to prevent writing # to a file in those backends content_key = page_content_cache_key(Repo("."), lesson.slug, page.slug, solution, variables) cached = arca.region.get_or_create(content_key, content_creator) # The urls are added twice to ``absolute_urls_to_freeze`` # when the content is created. # But it doesn't matter, duplicate URLs are skipped. absolute_urls = [urljoin(request.path, x) for x in cached["urls"]] absolute_urls_to_freeze.extend(absolute_urls) return cached
def test_cache_offer(model): """Test that forks don't render content when content exists in cache. """ repo = arca.get_repo(model.courses["test-course"].repo, model.courses["test-course"].branch) content_key = page_content_cache_key(repo, "beginners/cmdline", "index", None, model.courses["test-course"].vars) result = model.courses["test-course"].render_page( "beginners/cmdline", "index", None, content_key=content_key, request_url="/course/test-course/beginners/cmdline/") assert result["content"] is None # Also test that if provided a key which is gonna be rejected, # content is rendered result = model.courses["test-course"].render_page( "beginners/cmdline", "index", None, content_key=content_key + "asdfasdf", request_url="/course/test-course/beginners/cmdline/") assert result["content"] is not None
def course_page(course, lesson, page, solution=None): lesson_slug = lesson page_slug = page try: lesson = model.get_lesson(lesson_slug) canonical_url = url_for('lesson', lesson=lesson, _external=True) except LookupError: lesson = canonical_url = None kwargs = {} prev_link = session_link = next_link = session = None if course.is_link(): naucse.utils.views.forks_raise_if_disabled() fork_kwargs = {"request_url": request.path} try: # Checks if the rendered page content is in cache locally # to offer it to the fork. # ``course.vars`` calls ``course_info`` so it has to be in # the try block. # The function can also raise FileNotFoundError if the # lesson doesn't exist in repo. content_key = page_content_cache_key( arca.get_repo(course.repo, course.branch), lesson_slug, page, solution, course.vars) content_offer = arca.region.get(content_key) # We've got the fragment in cache, let's offer it to the fork. if content_offer: fork_kwargs["content_key"] = content_key data_from_fork = course.render_page(lesson_slug, page, solution, **fork_kwargs) record_content_urls(data_from_fork, f"/{course.slug}/") content = data_from_fork["content"] if content is None: # the offer was accepted content = content_offer["content"] for x in content_offer["urls"]: record_url(urljoin(request.path, x)) else: # the offer was rejected or the the fragment was not in cache arca.region.set(content_key, { "content": content, "urls": data_from_fork["content_urls"] }) for x in data_from_fork["content_urls"]: record_url(urljoin(request.path, x)) # compatibility page = process_page_data(data_from_fork.get("page")) course = process_course_data(data_from_fork.get("course"), slug=course.slug) session = process_session_data(data_from_fork.get("session")) kwargs["edit_info"] = links.process_edit_info( data_from_fork.get("edit_info")) prev_link, session_link, next_link = process_footer_data( data_from_fork.get("footer")) title = '{}: {}'.format(course["title"], page["title"]) except POSSIBLE_FORK_EXCEPTIONS as e: if raise_errors_from_forks(): raise rendered_replacement = False logger.error("There was an error rendering url %s for course '%s'", request.path, course.slug) if lesson is not None: try: logger.error( "Rendering the canonical version with a warning.") lesson_url, subpage_url, static_url = relative_url_functions( request.path, course, lesson) page = lesson.pages[page] content = page_content(lesson, page, solution, course, lesson_url=lesson_url, subpage_url=subpage_url, static_url=static_url)["content"] title = '{}: {}'.format(course.title, page.title) try: footer_links = course.get_footer_links( lesson.slug, page_slug, request_url=request.path, ) for link in footer_links: _prefix = f"/{course.slug}/" if link and link["url"].startswith(_prefix): record_url(link["url"]) prev_link, session_link, next_link = footer_links except POSSIBLE_FORK_EXCEPTIONS as e: if raise_errors_from_forks(): raise # The fork is failing spectacularly, so the footer # links aren't that important logger.error( "Could not retrieve even footer links from the fork at page %s", request.path) logger.exception(e) rendered_replacement = True kwargs["edit_info"] = get_edit_info(page.edit_path) kwargs["error_in_fork"] = True kwargs["travis_build_id"] = os.environ.get( "TRAVIS_BUILD_ID") except Exception as canonical_error: logger.error("Rendering the canonical version failed.") logger.exception(canonical_error) if not rendered_replacement: logger.exception(e) return render_template( "error_in_fork.html", malfunctioning_course=course, edit_info=get_edit_info(course.edit_path), faulty_page="lesson", lesson=lesson_slug, pg=page_slug, # avoid name conflict solution=solution, root_slug=model.meta.slug, travis_build_id=os.environ.get("TRAVIS_BUILD_ID"), ) else: if lesson is None: abort(404) lesson_url, subpage_url, static_url = relative_url_functions( request.path, course, lesson) page, session, prv, nxt = get_page(course, lesson, page) prev_link, session_link, next_link = get_footer_links( course, session, prv, nxt, lesson_url) content = page_content(lesson, page, solution, course=course, lesson_url=lesson_url, subpage_url=subpage_url, static_url=static_url) content = content["content"] allowed_elements_parser.reset_and_feed(content) title = '{}: {}'.format(course.title, page.title) kwargs["edit_info"] = get_edit_info(page.edit_path) if solution is not None: kwargs["solution_number"] = int(solution) return render_template("lesson.html", canonical_url=canonical_url, title=title, content=content, prev_link=prev_link, session_link=session_link, next_link=next_link, root_slug=model.meta.slug, course=course, lesson=lesson, page=page, solution=solution, session=session, **kwargs)
def render(page_type: str, slug: str, *args, **kwargs) -> Dict[str, Any]: """Return a rendered page for a course, based on page_type and slug. """ course = get_course_from_slug(slug) if course.is_link(): raise ValueError("Circular dependency.") path = [] if kwargs.get("request_url"): path = [kwargs["request_url"]] logger = UrlForLogger(views.app) with views.app.test_request_context(*path): with logger: info = { "course": { "title": course.title, "url": views.course_url(course), "vars": course.vars, "canonical": course.canonical, "is_derived": course.is_derived, }, } if page_type == "course": info["content"] = views.course_content(course) info["edit_info"] = get_edit_info(course.edit_path) elif page_type == "calendar": info["content"] = views.course_calendar_content(course) info["edit_info"] = get_edit_info(course.edit_path) elif page_type == "calendar_ics": info["calendar"] = str(views.generate_calendar_ics(course)) info["edit_info"] = get_edit_info(course.edit_path) elif page_type == "course_page": lesson_slug, page, solution, *_ = args lesson = views.model.get_lesson(lesson_slug) content_offer_key = kwargs.get("content_key") not_processed = object() content = not_processed if content_offer_key is not None: # the base repository has a cached version of the content content_key = page_content_cache_key( Repo("."), lesson_slug, page, solution, course.vars) # if the key matches what would be produced here, let's not return anything # and the cached version will be used if content_offer_key == content_key: content = None request_url = kwargs.get("request_url") if request_url is None: request_url = url_for('course_page', course=course, lesson=lesson, page=page, solution=solution) lesson_url, subpage_url, static_url = views.relative_url_functions( request_url, course, lesson) page, session, prv, nxt = views.get_page(course, lesson, page) # if content isn't cached or the version was refused, let's render # the content here (but just the content and not the whole page with headers, menus etc) if content is not_processed: content = views.page_content( lesson, page, solution, course, lesson_url=lesson_url, subpage_url=subpage_url, static_url=static_url, without_cache=True, ) if content is None: info["content"] = None info["content_urls"] = [] else: info["content"] = content["content"] info["content_urls"] = content["urls"] info.update({ "page": { "title": page.title, "css": page.info.get( "css" ), # not page.css since we want the css without limitation "latex": page.latex, "attributions": page.attributions, "license": serialize_license(page.license), "license_code": serialize_license(page.license_code) }, "edit_info": get_edit_info(page.edit_path) }) if session is not None: info["session"] = { "title": session.title, "url": url_for("session_coverpage", course=course.slug, session=session.slug), "slug": session.slug, } prev_link, session_link, next_link = views.get_footer_links( course, session, prv, nxt, lesson_url) info["footer"] = { "prev_link": prev_link, "session_link": session_link, "next_link": next_link } elif page_type == "session_coverpage": session_slug, coverpage, *_ = args session = course.sessions.get(session_slug) info.update({ "session": { "title": session.title, "url": url_for("session_coverpage", course=course.slug, session=session.slug), }, "content": views.session_coverpage_content(course, session, coverpage), "edit_info": get_edit_info(session.get_edit_path(course, coverpage)), }) else: raise ValueError("Invalid page type.") # generate list of absolute urls which need to be frozen further urls = set() for endpoint, values in logger.iter_calls(): url = url_for(endpoint, **values) if url.startswith( f"/{slug}" ): # this is checked once again in main repo, but let's save cache space urls.add(url) info["urls"] = list(urls) return info