def get_post_with_greed(url_body, parent_doc=None): pure_url_body = re.split("[?#]", url_body)[0] post_url = pure_url_body d = get_data_namespace() post_doc = d.get_doc(post_url) current_data_root = get_current_data_root() parent_doc = parent_doc or get_doc_in_request() if not post_doc and is_a_markdown_file(post_url) and parent_doc and isinstance(parent_doc, dict): filename = post_url if "/post/" in filename: filename = filename.split("/post/", 1)[-1] parent_post_doc_path = get_path_from_record(parent_doc) if parent_post_doc_path: post_doc_parent = os.path.split(parent_post_doc_path)[0] if post_doc_parent: abs_path = "%s/%s" % (post_doc_parent.strip("/"), filename.strip("/")) post_doc = d.get_doc_by_path(abs_path) if current_data_root and not post_doc: # 增加 wiki_root 作为前缀,再尝试匹配 abs_path = "%s/%s" % (current_data_root, filename.strip("/")) post_doc = d.get_doc_by_path(abs_path) if not post_doc: # 尝试 hit keyword 的方式进行搜索匹配 bucket = get_bucket_in_request_context() post_name = (get_get_var(url_body, "name") or "").strip() if post_name: if "." in post_name: post_name = os.path.splitext(post_name)[0] post_doc = get_one_post_by_es(bucket, keywords=post_name, under=current_data_root) if not post_doc and is_a_markdown_file(post_url): # 直接搜索 filename just_post_file_name = get_just_name(post_url) if just_post_file_name != post_name: post_doc = get_one_post_by_es(bucket, keywords=just_post_file_name, under=current_data_root) return post_doc
def should_md_doc_hit_folder_compiler(self): if not self.filepath: return False if not os.path.isfile(self.filepath): return False if not is_a_markdown_file(self.filepath): return False just_name = get_just_name(self.filepath) if just_name == 'index': return True else: return False
def get_file_timestamp(relative_path=None, metadata=None, abs_filepath=None, utc_offset=None): # 主要是获取 post 的date信息 # relative_path 是相对于 root 的 path if abs_filepath and not metadata: if os.path.isfile(abs_filepath): return os.path.getmtime(abs_filepath) else: return time.time() name_from_path = get_just_name(relative_path) try: metadata_date = metadata.get('date') if isinstance(metadata_date, datetime.datetime): # 先转成 str 形式,这样能最终获得 utc 的时间戳 date_s = metadata_date.strftime('%Y-%m-%d %H:%M:%S') else: date_s = smart_str( get_meta_value(metadata=metadata, key='date', default='')) # 先从meta中获取, 格式化之后,可以自动调整时差 if date_s: date_s = date_s.strip() if date_s and re.match('\d{4}\.\d+\.\d+$', date_s): # # 2018.3.19 这种日期形式的 转为 xxxx-xx-xx date_s = date_s.replace('.', '-') if not date_s: # 兼容 '2012-12-12 12-12 or 2012-12-12 12-12-12 这种格式' if re.match(r'^\d+-\d+-\d+ \d+-\d+(-\d+)?$', name_from_path): part1, part2 = name_from_path.split(' ', 1) try: s = '%s %s' % (part1, part2.replace('-', ':')) date = utc_date_parse(s, utc_offset=utc_offset) return date except: pass # 从文件名中获取 2012-1?2-1?2, date模式 date_search = re.search('/?([123]\d{3}-\d{1,2}-\d{1,2})[^/]*', relative_path) if date_search: # 可以从文件的路径中取, 兼容jekyll date_s = date_search.groups()[0] date = utc_date_parse(date_s, utc_offset=utc_offset) except (ValueError, TypeError): return time.time() timestamp = date_to_timestamp(date) if not timestamp: timestamp = time.time() return timestamp
def show_wiki_nodes_as_sub_site(): bucket = get_bucket_in_request_context() if not bucket: return request_path = get_request_path().strip("/") if not re.match("wiki_nodes(/|$)", request_path): return wiki_configs = get_json_content_by_path(bucket, "__wiki.json", force_dict=True) enable_wiki_nodes = auto_type(wiki_configs.get("enable_wiki_nodes", True)) if not enable_wiki_nodes: return wiki_root = smart_unicode(wiki_configs.get("wiki_root", "")) if not wiki_root: return wiki_root = wiki_root.strip("/") wiki_title = wiki_configs.get("wiki_title") or get_just_name(wiki_root, for_folder=True) path = request.values.get("path", "").strip("/") if request.values.get("type") == "data": # return json data wiki_root = wiki_root.lower() under = "%s/%s" % (wiki_root, path) posts_info = get_bucket_posts_info(bucket) data = filter_and_get_posts_link_points_info(posts_info, under=under) nodes = data.get("nodes") if nodes: for node in nodes: node_id = node.get("id") if node_id and isinstance(node_id, string_types): if node_id.startswith("#"): tag = node_id.lstrip("#") url = "/wiki/tag/%s" % tag node["url"] = url else: relative_path = get_relative_path(node_id.strip("/"), wiki_root, return_name_if_fail=False) if relative_path: url = "/wiki/post/%s" % relative_path node["url"] = url return force_response(data) else: return render_api_template("builtin_theme_wiki_nodes.jade", wiki_title=wiki_title)
def just_name(self): if self.abs_filepath: return get_just_name(self.abs_filepath) else: return get_just_name(self.relative_path)
def show_albums_as_sub_site(): bucket = get_bucket_in_request_context() if not bucket: return request_path = get_request_path().strip("/") if not re.match("album(/|$)", request_path): return if "." in request_path and guess_type( request_path, default_type="").startswith("image/"): # 可能是直接的图片地址,避免被整个 album 给拦截了 return site_configs = get_bucket_site_configs(bucket) albums_root = smart_unicode(site_configs.get("albums_root", "")) if not albums_root: return albums_root = albums_root.strip("/") #todo 允许直接设定 / ? albums_home_sort = site_configs.get("albums_home_sort", "-date") album_items_sort = site_configs.get("album_items_sort", "-date") page_title = site_configs.get("albums_title") or get_just_name( albums_root, for_folder=True) if re.match("album/?$", request_path): # folders doc_type = "folder" doc_sort = albums_home_sort under = albums_root else: doc_type = "image" doc_sort = album_items_sort under = "%s/%s" % (albums_root, request_path.split("/", 1)[-1].strip("/")) folder_doc = get_record_by_path(bucket=bucket, path=under) if folder_doc: page_title = folder_doc.get("title") or get_just_name( folder_doc.get("path"), for_folder=True) else: page_title = get_just_name(under, for_folder=True) if doc_sort not in ["date", "-date"]: doc_sort = "-date" limit = 15 # todo 可以设定? doc_level = 1 # min_images_count = 1 docs = Data.get_data(path=under, type=doc_type, limit=limit, level=doc_level, sort=doc_sort, pager_name='album_docs_pager', exclude='default') if doc_type == "folder": for doc in docs: doc_path = get_path_from_record(doc, is_lower=True) relative_path = doc_path.replace(albums_root.lower(), "", 1).strip("/") doc["album_url"] = "/album/%s" % relative_path return render_api_template( "builtin_theme_album_waterfall.jade", docs=docs, page_title=page_title, )
def filter_and_get_posts_link_points_info(posts_info, under=""): under = under.strip("/").strip().lower() links_info = get_value_from_data(posts_info, "links.links") if not isinstance(links_info, dict): links_info = {} tags_info = get_value_from_data(posts_info, "tags.tags") if not isinstance(tags_info, dict): tags_info = {} output_nodes = [] output_nodes_map = {} output_links = [] filepath_counter = {} filepath_group_info = {} # hit parent, +5 for tag, tagged_paths in tags_info.items(): valid_count = 0 tag_node_id = "#%s" % tag for path in tagged_paths: if under and not is_sub_path(path, under): continue else: filepath_counter[path] = filepath_counter.get(path, 0) + 1 valid_count += 1 if path not in output_nodes_map: path_node = dict(id=path, name=get_just_name(path), group=2) output_nodes.append(path_node) output_nodes_map[path] = path_node # create node-link if tag_node_id != path: output_links.append(dict(source=tag_node_id, target=path)) if not valid_count: continue tag_node = dict( id=tag_node_id, name=tag, val=valid_count, group=1, ) output_nodes.append(tag_node) output_nodes_map[tag_node_id] = tag_node for source_path, linked_paths in links_info.items(): if under and not is_sub_path(source_path, under): continue valid_count = 0 for path in linked_paths: if under and not is_sub_path(path, under): continue else: filepath_counter[path] = filepath_counter.get(path, 0) + 1 valid_count += 1 if path not in output_nodes_map: path_node = dict(id=path, name=get_just_name(path), group=2) output_nodes.append(path_node) output_nodes_map[path] = path_node # create node-link if source_path != path: output_links.append(dict(source=source_path, target=path)) if not valid_count: continue if source_path not in output_nodes_map: path_node = dict(id=source_path, name=get_just_name(source_path), group=2) output_nodes.append(path_node) output_nodes_map[source_path] = path_node # update path nodes count for path, count in filepath_counter.items(): path_node = output_nodes_map.get(path) if path_node: path_node["val"] = count for node in output_nodes: node_id = node.get("id") if node_id.startswith("#"): continue relative_path = get_relative_path(node_id.lower(), under.lower(), return_name_if_fail=False) if relative_path: level1_parent = relative_path.split("/")[0] if level1_parent not in filepath_group_info: group_id = len(filepath_group_info) + 5 filepath_group_info[level1_parent] = group_id else: group_id = filepath_group_info[level1_parent] node["group"] = group_id output = { "nodes": output_nodes, "links": output_links, } return output
def show_wiki_as_sub_site(): bucket = get_bucket_in_request_context() if not bucket: return request_path = get_request_path().strip("/") if not re.match("wiki(/|$)", request_path): return wiki_configs = get_json_content_by_path(bucket, "__wiki.json", force_dict=True) wiki_root = smart_unicode(wiki_configs.get("wiki_root", "")) if not wiki_root: return set_data_root_in_request(wiki_root) # set data_root to request wiki_root = wiki_root.strip("/") wiki_title = wiki_configs.get("wiki_title") or get_just_name(wiki_root, for_folder=True) wiki_root = wiki_root.lower() kwargs = dict(wiki_root=wiki_root, wiki_title=wiki_title, wiki_configs=wiki_configs) if re.match("wiki/?$", request_path): # index docs = [] user_categories = wiki_configs.get("categories") if not isinstance(user_categories, (list, tuple)): user_categories = [] for user_category in user_categories: if not isinstance(user_category, dict): continue category_path = user_category.get("path") summary = smart_unicode(user_category.get("summary") or "") icon = smart_unicode(user_category.get("icon") or "") doc = get_record_by_path(bucket=bucket, path=category_path) if not doc: category_path = "%s/%s" % (wiki_root, category_path.strip("/")) doc = get_record_by_path(bucket=bucket, path=category_path) if not doc: continue doc_type = get_type_from_record(doc) if doc_type not in ["post", "folder"]: continue doc["icon"] = icon or get_value_from_data(doc, "metadata.icon") doc["summary"] = summary or get_value_from_data(doc, "metadata.summary") docs.append(doc) if not docs: # by default docs = Data.get_data(type='folder', level=1, limit=50, with_page=False, path=wiki_root) # 处理 url, 取 relative index_docs = [] for doc in docs: wiki_url = get_wiki_url_for_doc(wiki_root, doc) if not wiki_url: continue doc["wiki_url"] = wiki_url index_docs.append(doc) return render_api_template("builtin_theme_knowbase_index.jade", docs=index_docs, **kwargs) elif re.match("wiki/tag/", request_path): current_tag = get_offset_path(request_path, 2) if not current_tag: abort(404, "no tag?") docs = get_records_by_tag(bucket, current_tag, sort_by="-date") for doc in docs: doc["wiki_url"] = get_wiki_url_for_doc(wiki_root, doc) return render_api_template("builtin_theme_knowbase_tag.jade", current_tag=current_tag, docs=docs, **kwargs) elif re.search("wiki/search(/|$)", request_path): keywords = request.values.get("s") data_namespace = get_data_namespace() docs = data_namespace.get_data(bucket=bucket, keywords=keywords, pager_name="wiki", path=wiki_root, sort_by='-date', min_limit=8) for doc in docs: doc["wiki_url"] = get_wiki_url_for_doc(wiki_root, doc) return render_api_template("builtin_theme_knowbase_search.jade", docs=docs, **kwargs) elif re.match("wiki/category/", request_path): # category category_path = get_offset_path(request_path, 2).lower() wiki_nodes_url = "/wiki_nodes?path=%s" % category_path category_path = "%s/%s" % (wiki_root, category_path) folder_doc = get_record_by_path(bucket, category_path) enable_wiki_nodes = auto_type(wiki_configs.get("enable_wiki_nodes", True)) if not enable_wiki_nodes: wiki_nodes_url = "" if not folder_doc or get_type_from_record(folder_doc) != "folder": abort(404, "no category found") else: category = Category(folder_doc) docs = auto_pg(bucket=bucket, data_type="post", pager_name="wiki", path=category.path, ignore_marked_id=True, prefix_to_ignore='_', sort_by='-date', min_limit=8) for doc in docs: doc["wiki_url"] = get_wiki_url_for_doc(wiki_root, doc) return render_api_template("builtin_theme_knowbase_category.jade", category=category, docs=docs, wiki_nodes_url=wiki_nodes_url, **kwargs) elif re.match("wiki/post/", request_path): # detail doc_path = get_offset_path(request_path, 2) doc_path = "%s/%s" % (wiki_root, doc_path) doc = get_record_by_path(bucket, doc_path) if not doc: abort(404, "no doc found") else: return render_api_template("builtin_theme_knowbase_post.jade", doc=doc, **kwargs)