def get_links_info(bucket): # k:v --> tag: [path1, path2] posts = get_bucket_posts_info(bucket) or {} raw_info = posts.get('links') or {} # links.links if not isinstance(raw_info, dict): raw_info = {} info = raw_info.get('links') or {} if not isinstance(info, dict): info = {} return info
def show_wiki_nodes_as_sub_site(): bucket = get_bucket_in_request_context() if not bucket: return request_path = get_request_path().strip("/") if not re.match("wiki_nodes(/|$)", request_path): return wiki_configs = get_json_content_by_path(bucket, "__wiki.json", force_dict=True) enable_wiki_nodes = auto_type(wiki_configs.get("enable_wiki_nodes", True)) if not enable_wiki_nodes: return wiki_root = smart_unicode(wiki_configs.get("wiki_root", "")) if not wiki_root: return wiki_root = wiki_root.strip("/") wiki_title = wiki_configs.get("wiki_title") or get_just_name(wiki_root, for_folder=True) path = request.values.get("path", "").strip("/") if request.values.get("type") == "data": # return json data wiki_root = wiki_root.lower() under = "%s/%s" % (wiki_root, path) posts_info = get_bucket_posts_info(bucket) data = filter_and_get_posts_link_points_info(posts_info, under=under) nodes = data.get("nodes") if nodes: for node in nodes: node_id = node.get("id") if node_id and isinstance(node_id, string_types): if node_id.startswith("#"): tag = node_id.lstrip("#") url = "/wiki/tag/%s" % tag node["url"] = url else: relative_path = get_relative_path(node_id.strip("/"), wiki_root, return_name_if_fail=False) if relative_path: url = "/wiki/post/%s" % relative_path node["url"] = url return force_response(data) else: return render_api_template("builtin_theme_wiki_nodes.jade", wiki_title=wiki_title)
def _posts_info(self): return get_bucket_posts_info(self.bucket)
def update_post_tags_words_info(bucket, record_data): path = get_path_from_record(record_data) lower_path = path.lower().lstrip('/') if not path: return if not is_a_markdown_file(path): return if lower_path.startswith('_nav/'): return posts_info = get_bucket_posts_info(bucket) or {} # data init bucket_text_words = to_int(posts_info.get('text_words') or 0, default_if_fail=0) # prepare tags info tags_info = posts_info.setdefault( 'tags', {}) # {'paths':{path:[tag1,tag2]} , 'tags': {'tag':[path1, path2]} } tags_info_tags = tags_info.setdefault('tags', {}) tags_info_paths = tags_info.setdefault('paths', {}) # prepare links info links_info = posts_info.setdefault( "links", {}) # {'paths': {path:[back_path1, back_path2]]} , # 'back_paths': {'back_path':[path1, path2]} } links_info_links = links_info.setdefault("links", {}) links_info_paths = links_info.setdefault("paths", {}) words_info = posts_info.setdefault('words', {}) # {'path': text_words} is_deleted = record_data.get('is_deleted', False) post_status = record_data.get('status') or 'public' post_tags = record_data.get('tags') or [] if not isinstance(post_tags, (list, tuple)): post_tags = [] post_doc_links, wiki_tags = get_linked_docs_from_markdown_content( path, record_data.get("raw_content"), md_link_abs_check_func=partial(has_record_by_path, bucket)) if not isinstance(post_doc_links, (list, tuple)): post_doc_links = [] text_words = to_int(record_data.get('text_words'), default_if_fail=0) # 如果已有 words 的信息,先减去,避免重复计算 old_text_words = to_int(words_info.get(lower_path), default_if_fail=0) if old_text_words: bucket_text_words -= old_text_words # deleted, 草稿类似的状态,不进入计算;去除相关的信息 if is_deleted or post_status in ['draft', 'private']: words_info.pop(lower_path, None) # handle delete tags old_tags = tags_info_paths.get(lower_path) if not isinstance(old_tags, (list, tuple)): old_tags = [] old_tags = [smart_unicode(tag) for tag in old_tags] tags_info_paths.pop(lower_path, None) for tag in old_tags: tags_info_tags_for_tag_paths = tags_info_tags.setdefault(tag, []) if lower_path in tags_info_tags_for_tag_paths: tags_info_tags_for_tag_paths.remove(lower_path) if not tags_info_tags_for_tag_paths: # 空的 tags tags_info_tags.pop(tag, None) # handle delete links old_links = links_info_paths.get(lower_path) if not isinstance(old_links, (list, tuple)): old_links = [] old_links = [smart_unicode(link) for link in old_links] links_info_paths.pop(lower_path, None) for link in old_links: links_info_link_back_paths = links_info_links.setdefault(link, []) if lower_path in links_info_link_back_paths: links_info_link_back_paths.remove(lower_path) if not links_info_link_back_paths: # 空的 links 了 links_info_links.pop(link, None) else: bucket_text_words += text_words words_info[lower_path] = text_words # handle tags if post_tags: tags_info_paths[lower_path] = post_tags for tag in post_tags: tags_info_tags_for_tag_paths = tags_info_tags.setdefault(tag, []) if lower_path not in tags_info_tags_for_tag_paths: tags_info_tags_for_tag_paths.append(lower_path) empty_tags = [] for tag, paths_tagged in tags_info_tags.items(): if not paths_tagged: empty_tags.append(tag) continue if not isinstance(paths_tagged, list): continue if lower_path in paths_tagged and tag not in post_tags: paths_tagged.remove(lower_path) if not paths_tagged: empty_tags.append(tag) for empty_tag in empty_tags: tags_info_tags.pop(empty_tag, None) # handle links if post_doc_links: links_info_paths[lower_path] = post_doc_links for link in post_doc_links: links_info_link_back_paths = links_info_links.setdefault(link, []) if lower_path not in links_info_link_back_paths: links_info_link_back_paths.append(lower_path) empty_links = [] for link, paths_linked in links_info_links.items(): if not paths_linked: empty_links.append(link) continue if not isinstance(paths_linked, list): continue if lower_path in paths_linked and link not in post_doc_links: paths_linked.remove(lower_path) if not paths_linked: empty_links.append(link) for empty_link in empty_links: links_info_links.pop(empty_link, None) if bucket_text_words < 0: bucket_text_words = 0 posts_info['text_words'] = bucket_text_words set_bucket_configs(bucket, configs=posts_info, config_type='posts')