def get_tags_info_under_path(bucket, under): # return info, k:v --> tag: [path1, path2] if not bucket: return {} raw_info = get_tags_info(bucket) # tags.tags info = {} for tag, tagged_paths in raw_info.items(): for path in tagged_paths: if under and not is_sub_path(path, under): continue else: info.setdefault(tag, []).append(path) return info
def __init__(self, server_node, root, filepath, private_key=None, should_encrypt_file=False, is_dir=False, is_deleted=False, ipfs_key=None, version=None, auto_clean_bucket=True, relative_path=None, real_relative_path=None, raw_content=None, files_info=None): self.files_info = files_info self.server_node = server_node self.root = root self.filepath = filepath self.private_key = private_key self.should_encrypt_file = should_encrypt_file self.is_dir = is_dir self.is_deleted = is_deleted self.ipfs_key = ipfs_key self.version = version self.auto_clean_bucket = auto_clean_bucket # 主要是 Markdown 文档编译,针对 FarBox Page 时候用的 self.real_relative_path = real_relative_path # 如果没有指定 relative_path, 是需要从 root & filepath 中获得的 # filepath 是用于获得文件内容的,如果有指定了 raw_content,那就是 raw_content 优先 self._raw_content = raw_content if relative_path: self.relative_path = relative_path else: if not is_sub_path(self.filepath, parent_path=self.root): self.relative_path = '' else: self.relative_path = get_relative_path(self.filepath, root=self.root) self.lower_relative_path = self.relative_path.lower()
def after_synced(filepath, root, app_name, **extra_data): filepath = same_slash(filepath) if not os.path.exists(filepath): return # ignore if not is_sub_path(filepath, root): return # ignore data_path = get_sync_data_filepath(filepath, root, app_name) now = datetime.datetime.now() relative_path = get_relative_path(filepath, root) sync_data = dict(filepath=relative_path, synced_at=now.strftime('%Y-%m-%d %H:%M:%S'), md5=md5_for_file(filepath), is_dir=os.path.isdir(filepath), is_relative=True) sync_data.update(extra_data) with open(data_path, 'w') as f: f.write(json.dumps(sync_data)) # store the parent folders into the local sync db parent_folder = os.path.dirname(filepath) parent_data_path = get_sync_data_filepath(parent_folder, root, app_name) if not os.path.isfile(parent_data_path): after_synced(parent_folder, root, app_name)
def get_parents(self, root=None, includes_root=False): # 得到所有的上级目录 ps = [] if not self.raw: return ps path_parts = self.path.split('/')[:-1][:50] # 最多50个层级 parent_paths = [] for i in range(len(path_parts)): parent_paths.append('/'.join(path_parts[:i + 1])) parent_paths.reverse() # just reverse it for human parent_categories = [] for parent_path in parent_paths: if root and not is_sub_path(parent_path, root): to_continue = True if includes_root and is_same_path(root, parent_path): to_continue = False if to_continue: continue parent_category = Category(parent_path) if parent_category: parent_categories.append(parent_category) parent_categories.reverse() return parent_categories
def filter_and_get_posts_link_points_info(posts_info, under=""): under = under.strip("/").strip().lower() links_info = get_value_from_data(posts_info, "links.links") if not isinstance(links_info, dict): links_info = {} tags_info = get_value_from_data(posts_info, "tags.tags") if not isinstance(tags_info, dict): tags_info = {} output_nodes = [] output_nodes_map = {} output_links = [] filepath_counter = {} filepath_group_info = {} # hit parent, +5 for tag, tagged_paths in tags_info.items(): valid_count = 0 tag_node_id = "#%s" % tag for path in tagged_paths: if under and not is_sub_path(path, under): continue else: filepath_counter[path] = filepath_counter.get(path, 0) + 1 valid_count += 1 if path not in output_nodes_map: path_node = dict(id=path, name=get_just_name(path), group=2) output_nodes.append(path_node) output_nodes_map[path] = path_node # create node-link if tag_node_id != path: output_links.append(dict(source=tag_node_id, target=path)) if not valid_count: continue tag_node = dict( id=tag_node_id, name=tag, val=valid_count, group=1, ) output_nodes.append(tag_node) output_nodes_map[tag_node_id] = tag_node for source_path, linked_paths in links_info.items(): if under and not is_sub_path(source_path, under): continue valid_count = 0 for path in linked_paths: if under and not is_sub_path(path, under): continue else: filepath_counter[path] = filepath_counter.get(path, 0) + 1 valid_count += 1 if path not in output_nodes_map: path_node = dict(id=path, name=get_just_name(path), group=2) output_nodes.append(path_node) output_nodes_map[path] = path_node # create node-link if source_path != path: output_links.append(dict(source=source_path, target=path)) if not valid_count: continue if source_path not in output_nodes_map: path_node = dict(id=source_path, name=get_just_name(source_path), group=2) output_nodes.append(path_node) output_nodes_map[source_path] = path_node # update path nodes count for path, count in filepath_counter.items(): path_node = output_nodes_map.get(path) if path_node: path_node["val"] = count for node in output_nodes: node_id = node.get("id") if node_id.startswith("#"): continue relative_path = get_relative_path(node_id.lower(), under.lower(), return_name_if_fail=False) if relative_path: level1_parent = relative_path.split("/")[0] if level1_parent not in filepath_group_info: group_id = len(filepath_group_info) + 5 filepath_group_info[level1_parent] = group_id else: group_id = filepath_group_info[level1_parent] node["group"] = group_id output = { "nodes": output_nodes, "links": output_links, } return output