def sync_for_updated_files(self): files_info_on_server = send_message( node=self.server_node, private_key=self.private_key, action='show_files', message='' ) self.files_info_on_server = files_info_on_server lower_files_info_on_server = get_value_from_data(files_info_on_server, 'message.lower_files') or {} lower_folders_info_on_server = get_value_from_data(files_info_on_server, 'message.lower_folders') or {} if not isinstance(lower_files_info_on_server, dict): lower_files_info_on_server = {} if not isinstance(lower_folders_info_on_server, (list, tuple)): lower_folders_info_on_server = [] synced = False filepaths = sync_loop_local_filesystem(self.root, app_name=self.app_name_for_sync, extra_should_sync_func=self.should_sync_file_func) for filepath in filepaths: one_file_is_synced = self.sync_one_file(filepath, lower_files_info_on_server=lower_files_info_on_server, lower_folders_info_on_server=lower_folders_info_on_server, ) if one_file_is_synced: synced = True return synced
def get_default_contacts_from_post(post): contacts = {} if isinstance(post, dict) and post.get('type') == 'post': from_address = get_value_from_data(post, 'metadata.from') from_author_name = get_value_from_data(post, 'metadata.author') if from_address and from_author_name: contacts[from_author_name] = from_address return contacts
def __filter(objects, attr=None, attr_value=None, opt=None, return_one=False, **kwargs): # posts.filter_by_date('2013-12-14'.date, '>=') # posts.filter_by_tag('test', 'in') if not attr and not attr_value and kwargs: attr = kwargs.keys()[0] attr_value = kwargs.values()[0] if attr is None: return objects if len(objects) > 2000: # 最大尺寸限制 objects = objects[:2000] if isinstance(attr_value, (list, tuple)): # in for list result = filter( lambda obj: get_value_from_data(obj, attr) in attr_value, objects) elif attr and isinstance(attr, (str, unicode)): filtered_objects = [] for row in objects: value_got = get_value_from_data(row, attr) if opt == 'in' and isinstance(attr_value, (str, unicode, int, float)): if isinstance(value_got, (list, tuple)) and attr_value in value_got: filtered_objects.append(row) elif opt in ['>=', '=>'] and value_got >= attr_value: filtered_objects.append(row) elif opt in ['<=', '=<'] and value_got <= attr_value: filtered_objects.append(row) elif not opt: if isinstance( value_got, (list, tuple)) and not isinstance(attr_value, (list, tuple)): # like filter(posts, 'tag', 'test') if attr_value in value_got: filtered_objects.append(row) else: if type(attr_value) == bool: # boolean 类型的特殊处理 if attr_value == bool(value_got): filtered_objects.append(row) elif attr_value == value_got: filtered_objects.append(row) result = filtered_objects else: result = [] if return_one: if result: return result[0] else: return None else: return result
def seo(self, keywords=None, description=None): if getattr(request, 'seo_header_set_already', False): # 在一个页面内,仅仅能运行一次, 会被 h.headers 调用,如果要使用seo,确保 seo 在之前运行 return '' site_configs = get_site_configs() keywords = keywords or get_value_from_data(request, 'doc.metadata.keywords')\ or get_value_from_data(site_configs, 'keywords') if isinstance(keywords, (list, tuple)): # keywords 是一个list keywords = [smart_unicode(k) for k in keywords] keywords = ', '.join(keywords) description = description or get_value_from_data(request, 'doc.metadata.description') or \ get_value_from_data(site_configs,'description') html_content = self.set_metas(keywords=keywords, description=description) request.seo_header_set_already = True return html_content
def get_nav(self, meta_doc=None, items=None, as_items=False, load_front_sources=True, toggle_menu=False, **scss_vars): #if getattr(g, 'disable_nav', False): # 当前页面内禁用了 nav 的调用 # return '' if meta_doc is None: meta_doc = get_doc_in_request() if meta_doc and get_value_from_data(meta_doc, 'metadata.disable_nav'): # 有上下文doc,并且禁用了导航 return '' nav_items = get_nav_items_from_site_configs(self.bucket) # 用户自己设定的 nav_items_in_doc = get_nav_items_from_doc(meta_doc) if nav_items_in_doc: # 当前文档定义的导航,优先级高 nav_items = nav_items_in_doc if not nav_items: # items这边变量一般是设计师传入了,相当于默认的导航;如果没有,则是自动生成的导航 nav_items = deal_nav_items(items) or get_auto_nav_items( self.bucket) if as_items: # 直接返回原始的 list,以供模板进一步定义 return nav_items else: # 渲染为 HTML 的结果返回 return Html.get_nav(nav_items, load_front_sources=load_front_sources, toggle_menu=toggle_menu, **scss_vars)
def get_site_config(fields, type_required=None, default_value=None): if isinstance(type_required, list): type_required = tuple(type_required) bucket = get_bucket_in_request_context() if not bucket: if default_value is not None: return default_value else: return None site_configs = get_bucket_site_configs(bucket) if not isinstance(fields, (list, tuple)): fields = [fields] for field in fields: if not isinstance(field, string_types): continue field_value = get_value_from_data(site_configs, field) if field_value is not None: if type_required: if isinstance(field_value, type_required): return field_value else: return auto_type(field_value) if default_value is not None: return default_value else: return None
def sort(objects, attr='position', ordered_attr=None, ordered_keys=None, match=False): # todo 现在没有 position 这个字段了 if attr.startswith('-'): reverse = True attr = attr.lstrip('-') else: reverse = False if reverse == '-': reverse = True if attr: new_objects = sorted(objects, key=lambda o: get_value_from_data(o, attr)) else: new_objects = objects # 原始的就可以了。 if reverse: new_objects.reverse() if ordered_attr and ordered_keys and isinstance( ordered_attr, string_types) and isinstance(ordered_keys, (list, tuple)): # objects 中每个 object 中如果有 ordered_attr 这个属性,并且对应的 value 在 ordered_keys 中进行排序 ordered_keys = [k for k in ordered_keys if isinstance(k, string_types)] objects_map = { get_value_from_data(obj, ordered_attr): obj for obj in new_objects } head_objects = [] for key in ordered_keys: key_matched_obj = objects_map.get(key) if key_matched_obj is not None: head_objects.append(key_matched_obj) try: new_objects.remove(key_matched_obj) except: pass if match: # 仅仅处理 ordered_keys 的逻辑 return head_objects return head_objects + new_objects return new_objects
def sync_for_deleted_files(self): # 处理删除了的文件 synced = False filepaths_to_delete_data = sync_find_files_to_delete( self.root, app_name=self.app_name_for_sync, as_dict=True) for filepath_to_delete_data in filepaths_to_delete_data: filepath_to_delete = filepath_to_delete_data['filepath'] is_dir = filepath_to_delete_data.get('is_dir', False) relative_path = get_relative_path(filepath_to_delete, root=self.root) ipfs_to_delete = self.ipfs_files.pop(relative_path, None) if isinstance(ipfs_to_delete, dict): ipfs_hash_to_delete = ipfs_to_delete.get('hash') else: ipfs_hash_to_delete = ipfs_to_delete self.remove_file_from_ipfs(ipfs_hash_to_delete) # is_deleted=True, send md5 value as version md5_value = filepath_to_delete_data.get('md5') compiler_sync_worker = FarBoxSyncCompilerWorker( server_node=self.server_node, root=self.root, filepath=filepath_to_delete, is_deleted=True, is_dir=is_dir, private_key=self.private_key, should_encrypt_file=self.should_encrypt_file, ipfs_key=ipfs_hash_to_delete, version=md5_value, auto_clean_bucket=self.auto_clean_bucket, files_info=self.files_info) sync_status = compiler_sync_worker.sync() self.record_sync_log(filepath=filepath_to_delete, sync_status=sync_status, is_deleted=True) if sync_status and sync_status.get('code') == 200: synced = True # at last, mark status as synced after_sync_deleted(filepath_to_delete, root=self.root, app_name=self.app_name_for_sync) # files on server, but no in local side, clean the configs_for_files # should run after self.sync_for_updated_files, to get self.files_info_on_server files_info_on_server = get_value_from_data(self.files_info_on_server, 'message.files') or {} for relative_path in files_info_on_server.keys(): abs_filepath = join(self.root, relative_path) if not os.path.isfile(abs_filepath): self.ipfs_files.pop(relative_path, None) synced = True return synced
def _show_toc(self): # 只有post,并且post以markdown格式显示的,才有__show_toc这个属性 if self.is_post_content and self.parent and not self.site_is_plain_text_type: # post未设定,走site.configs # post已设定,走post.metadata # 默认是post自行定义 show_toc = get_value_from_data( self.parent, 'metadata.toc', default=self.site_configs.get('show_post_toc')) return is_on(show_toc) # 默认返回None,表示否 return False
def comments_as_html(obj): doc = obj site_configs = get_bucket_site_configs() should_hide_comments = not get_value_from_data(site_configs, 'comments', True) third_party_comments_script = get_value_from_data( site_configs, 'third_party_comments_script') or '' third_party_comments_script = smart_unicode( third_party_comments_script.strip()) if third_party_comments_script: # 有第三方评论脚本,直接进行替换 should_hide_comments = True if not should_hide_comments and get_value_from_data( doc, 'metadata.comment') in [False, 'no', 'No']: # doc 本身不允许显示 should_hide_comments = True if should_hide_comments: # 不显示评论系统 return third_party_comments_script html = render_api_template('comments.jade', doc=doc) return html
def get_comments(parent_doc, bucket=None, as_tree=None): bucket = bucket or get_bucket_in_request_context() or request.values.get('bucket') if not bucket: return [] path = to_doc_path(parent_doc) comments_doc = get_comments_record(bucket, path) site_configs = get_bucket_site_configs(bucket) if not get_value_from_data(site_configs, "comments", default=True): return [] if as_tree is None: # 自动匹配, 网站设置中对应 comments_type = get_value_from_data(site_configs, 'comments_type') or 'tree' if comments_type in ['tree']: as_tree = True else: as_tree = False utc_offset = to_float(parent_doc.get('_utc_offset'), 8) return get_comments_by_comments_doc(comments_doc, as_tree=as_tree, utc_offset=utc_offset)
def get_nav_items_from_doc(doc): # 在 metadata 中声明了 nav,并且本身是一个 list,子元素为 dict类型 if not doc: return [] nav_items = [] nav_configs_in_doc = get_value_from_data(doc, 'metadata.nav') if not nav_configs_in_doc or not isinstance(nav_configs_in_doc, (list, tuple)): return [] for raw_nav_item in nav_configs_in_doc: if isinstance(raw_nav_item, dict) and raw_nav_item: k, v = raw_nav_item.items()[0] if isinstance(k, basestring) and isinstance(v, basestring): nav_item = dict(name=k, url=v) nav_items.append(nav_item) return nav_items
def filter_records_for_bucket(records, fields): fields_to_show = fields if not fields_to_show or not isinstance(fields_to_show, (list, tuple)): fields_to_show = [] records_to_return = [] for record_id, raw_record in records: if record_id in zero_ids: continue record = ssdb_data_to_py_data(raw_record) if not record: continue if not isinstance(record, dict): record = dict(data=record) record['_id'] = record_id if fields_to_show: f_record = {'_id': record_id} for field in fields_to_show: v = get_value_from_data(record, field) if v is not None: f_record[field] = v record = f_record records_to_return.append(record) return records_to_return
def sync_one_file(self, filepath, lower_files_info_on_server=None, lower_folders_info_on_server=None, re_check=False, should_store_files_info=False): if re_check: should_sync = detect_should_sync(filepath=filepath, root=self.root, app_name=self.app_name_for_sync, check_md5=True, extra_should_sync_func=self.should_sync_file_func) if not should_sync: return False synced = False lower_files_info_on_server = lower_files_info_on_server or {} lower_folders_info_on_server = lower_folders_info_on_server or [] is_file = os.path.isfile(filepath) relative_path = get_relative_path(filepath, root=self.root) file_size = os.path.getsize(filepath) file_real_size = file_size if self.should_encrypt_file and self.private_key and is_file: # encrypted_filepath 是一个临时文件 encrypted_filepath = encrypt_file(filepath, encrypt_key=self.private_key) if not encrypted_filepath: return file_real_size = os.path.getsize(encrypted_filepath) ipfs_key = self.add_file_to_ipfs(encrypted_filepath) try: os.remove(encrypted_filepath) except: pass elif is_file: ipfs_key = self.add_file_to_ipfs(filepath) else: ipfs_key = None file_version = ipfs_key if not ipfs_key and os.path.isfile(filepath): # 兼容没有 ipfs 的时候,用文件的 md5 值来代替 file_version = get_md5_for_file(filepath) # 跟服务端上的 files 的 lower_files 上的信息进行比对,如果文件相同,则 ignore 掉 lower_relative_path = to_unicode(relative_path.lower()) should_ignore = False if file_version: remote_file_version = get_value_from_data(lower_files_info_on_server.get(lower_relative_path), 'version') if not remote_file_version: remote_file_version = get_value_from_data(lower_files_info_on_server.get(lower_relative_path), 'hash') if remote_file_version == file_version: #if settings.DEBUG: # print('has same file on server already for %s' % relative_path) should_ignore = True self.ipfs_files[relative_path] = dict(hash=file_version, size=file_size, real_size=file_real_size) is_dir = os.path.isdir(filepath) if is_dir: if lower_relative_path in lower_folders_info_on_server: #if settings.DEBUG: # print('has same folder on server already for %s' % relative_path) should_ignore = True if should_ignore: # ignore 的进行保存,避免下次 loop 继续被找到 after_synced(filepath, root=self.root, app_name=self.app_name_for_sync) else: sync_compiler_worker = FarBoxSyncCompilerWorker( server_node=self.server_node, root=self.root, filepath=filepath, is_deleted=False, is_dir=is_dir, private_key=self.private_key, should_encrypt_file=self.should_encrypt_file, ipfs_key = ipfs_key, auto_clean_bucket=self.auto_clean_bucket, files_info=self.files_info, ) sync_status = sync_compiler_worker.sync() self.record_sync_log(filepath=filepath, sync_status=sync_status, is_deleted=False) if sync_status and sync_status.get('code') == 200: synced = True after_synced(filepath, root=self.root, app_name=self.app_name_for_sync) if settings.DEBUG: print("synced (to) %s" % filepath) if should_store_files_info: self.store_files_info() elif not sync_status: # 没有 status 返回, 认为属于 ignore 的一种 after_synced(filepath, root=self.root, app_name=self.app_name_for_sync) return synced
def to_form_fields_obj(data_obj, keys, formats=None, extra_handler_func=None): # 将一个 dict, 预处理为可HTML 格式进行编辑的数据,一般是处理 json 的可编辑性 field_objs_list = [] formats = formats or {} if not isinstance(formats, dict): # 必须是dict类型 formats = {} if basic_form_formats: new_formats = basic_form_formats.copy() new_formats.update(formats) formats = new_formats if not isinstance(keys, (tuple, list)): return field_objs_list for key in keys: # 先提取 placeholder, 反引号的方式(头尾边界比较容易判断) # placeholder也可能用作他用,比如select的options if isinstance(key, dict): # 直接传入的是 dict 类型,不需要特别处理, 但是比较少见 field_objs_list.append(key) continue key = smart_unicode(key) # 必须是 unicode placeholder = '' p_c = re.search('`(.*?)`', key) if p_c and 'form_keys=' not in key: # form_keys= 不处理 ```, 因为其子元素需要这些信息 placeholder = p_c.group(1) key = re.sub(r'`.*?`', '', key) key, extra_info = extract_extra_info( key) # extra_info 是从单个key的括号内提取的内容 if 'placeholder' in extra_info: placeholder = extra_info['placeholder'] field_type = extra_info.get('type') or '' # 默认, 可能没有提取信息来(单行的话) if '@' in key: # 使用@方式,优先级更高 key, field_type = key.rsplit('@', 1) if not field_type and 'password' in key: field_type = 'password' # key_matched_data 实际上就是formats key_matched_data = formats.get(key) or {} # 先处理 format if not isinstance(key_matched_data, dict): key_matched_data = {} key_matched_data = key_matched_data.copy() # copy 避免产生不必要的混乱 key_matched_data['key'] = key field_type = field_type or key_matched_data.get( 'type') # 原先没有定义 field_type, 从 formats中获得 if placeholder: key_matched_data['placeholder'] = placeholder if '.' in key: key_title = key.split('.')[-1].replace('_', ' ').title() else: key_title = key.replace('_', ' ').title() if 'Id' in key_title: key_title = re.sub(r'( |^)(Id)( |$)', '\g<1>ID\g<3>', key_title) key_matched_data['title'] = key_matched_data.get('title') or key_title # set value default_value = extra_info.get("default") # 默认值 if default_value is None: default_value = key_matched_data.get('default', '') if default_value is None: default_value = '' value = get_value_from_data(data_obj, key, default=default_value) if default_value and isinstance(default_value, string_types) and value == '': # 有值,但是空字符串的时候,使用 default_value 来处理 value = default_value if value is None: value = '' key_matched_data['value'] = value if field_type == 'timezone': field_type = 'select' key_without_dot = key.split('.')[-1] if field_type == 'select' and key_without_dot in DEFAULT_OPTIONS: key_matched_data['options'] = DEFAULT_OPTIONS[key_without_dot] # 类型转换 if field_type == 'bool': # bool 转为 select 类型 field_type = 'select' key_matched_data['options'] = [('yes', 'Yes'), ('no', 'No')] key_matched_data['value'] = 'yes' if key_matched_data.get( 'value') else 'no' elif field_type == 'select': if placeholder and not key_matched_data.get('options'): # 通过placeholder 计算 options option_values = [ value.strip() for value in placeholder.split(',') if value.strip() ] if not extra_info.get('index_value', True): # value不为索引 key_matched_data['options'] = [(v, v.replace('_', " ")) for v in option_values] else: # value为索引值 # display_text@key options = [] for i, v in enumerate(option_values): if isinstance(v, (str, unicode)): v = v.strip() option = (i + 1, v) if v.endswith('@'): # 相当于 显示的内容,即 value本身 v = v[:-1] # remove @ if isinstance(v, string_types) and len( v) < 100 and re.match( r'[.a-z0-9+_-]+$', v, flags=re.I): option = (v, v) if isinstance(v, string_types) and '@' in v: display_text, k = v.split('@', 1) option = (k.strip(), display_text.strip()) options.append(option) key_matched_data['options'] = options elif not key_matched_data.get('options') and isinstance( value, (list, tuple)): # 直接从 list 类型 的 value 中提取 options = [] for row in value: if isinstance(row, string_types): options.append([row, row]) key_matched_data['options'] = options # value 可能是整数,就先转为 int、 float 的类型 if isinstance(value, string_types): if re.match(r'\d+$', value): value = int(value) key_matched_data['value'] = value elif re.match(r'\d+\.\d+$', value): value = to_float(value) key_matched_data['value'] = value elif field_type == 'list': # list 的转为 text,可以用 textarea 来渲染 key_matched_data[ 'key'] = key + '@list' # 这样 backend 在重新处置这个字段的时候,会转为 list 的类型 field_type = 'text' # text == textarea old_value = key_matched_data.get('value') if isinstance(old_value, (list, tuple)): value = '\n'.join(old_value) key_matched_data['value'] = value elif field_type == 'file': # 将 placeholder 的内容取出来作为 filepath key_matched_data[ 'placeholder'] = 'drag file here to upload/replace' # 额外的扩充, 由程序的逻辑控制 if extra_handler_func and hasattr(extra_handler_func, '__call__'): field_type, key_matched_data = extra_handler_func( field_type, key_matched_data) if extra_info: key_matched_data.update(extra_info) # 设定了dom的固定宽度、高度 for w_h_field in ['height', 'width']: w_h_value = key_matched_data.get(w_h_field) if w_h_value: w_h_value = smart_unicode(w_h_value)[:30].strip() if re.match('[\d.]+$', w_h_value): w_h_value += 'px' key_matched_data[w_h_field] = w_h_value # 某些file_type 比如 category / list 最后都转为HTML类型的field_type if field_type: key_matched_data['type'] = field_type else: key_matched_data['type'] = 'default' if key_matched_data.get('type') == 'list': # list 类型的value的处理 if isinstance(key_matched_data.get('value', None), (list, tuple)): key_matched_data['value'] = '\n'.join( key_matched_data['value']) field_objs_list.append(key_matched_data) return field_objs_list
def show_wiki_as_sub_site(): bucket = get_bucket_in_request_context() if not bucket: return request_path = get_request_path().strip("/") if not re.match("wiki(/|$)", request_path): return wiki_configs = get_json_content_by_path(bucket, "__wiki.json", force_dict=True) wiki_root = smart_unicode(wiki_configs.get("wiki_root", "")) if not wiki_root: return set_data_root_in_request(wiki_root) # set data_root to request wiki_root = wiki_root.strip("/") wiki_title = wiki_configs.get("wiki_title") or get_just_name(wiki_root, for_folder=True) wiki_root = wiki_root.lower() kwargs = dict(wiki_root=wiki_root, wiki_title=wiki_title, wiki_configs=wiki_configs) if re.match("wiki/?$", request_path): # index docs = [] user_categories = wiki_configs.get("categories") if not isinstance(user_categories, (list, tuple)): user_categories = [] for user_category in user_categories: if not isinstance(user_category, dict): continue category_path = user_category.get("path") summary = smart_unicode(user_category.get("summary") or "") icon = smart_unicode(user_category.get("icon") or "") doc = get_record_by_path(bucket=bucket, path=category_path) if not doc: category_path = "%s/%s" % (wiki_root, category_path.strip("/")) doc = get_record_by_path(bucket=bucket, path=category_path) if not doc: continue doc_type = get_type_from_record(doc) if doc_type not in ["post", "folder"]: continue doc["icon"] = icon or get_value_from_data(doc, "metadata.icon") doc["summary"] = summary or get_value_from_data(doc, "metadata.summary") docs.append(doc) if not docs: # by default docs = Data.get_data(type='folder', level=1, limit=50, with_page=False, path=wiki_root) # 处理 url, 取 relative index_docs = [] for doc in docs: wiki_url = get_wiki_url_for_doc(wiki_root, doc) if not wiki_url: continue doc["wiki_url"] = wiki_url index_docs.append(doc) return render_api_template("builtin_theme_knowbase_index.jade", docs=index_docs, **kwargs) elif re.match("wiki/tag/", request_path): current_tag = get_offset_path(request_path, 2) if not current_tag: abort(404, "no tag?") docs = get_records_by_tag(bucket, current_tag, sort_by="-date") for doc in docs: doc["wiki_url"] = get_wiki_url_for_doc(wiki_root, doc) return render_api_template("builtin_theme_knowbase_tag.jade", current_tag=current_tag, docs=docs, **kwargs) elif re.search("wiki/search(/|$)", request_path): keywords = request.values.get("s") data_namespace = get_data_namespace() docs = data_namespace.get_data(bucket=bucket, keywords=keywords, pager_name="wiki", path=wiki_root, sort_by='-date', min_limit=8) for doc in docs: doc["wiki_url"] = get_wiki_url_for_doc(wiki_root, doc) return render_api_template("builtin_theme_knowbase_search.jade", docs=docs, **kwargs) elif re.match("wiki/category/", request_path): # category category_path = get_offset_path(request_path, 2).lower() wiki_nodes_url = "/wiki_nodes?path=%s" % category_path category_path = "%s/%s" % (wiki_root, category_path) folder_doc = get_record_by_path(bucket, category_path) enable_wiki_nodes = auto_type(wiki_configs.get("enable_wiki_nodes", True)) if not enable_wiki_nodes: wiki_nodes_url = "" if not folder_doc or get_type_from_record(folder_doc) != "folder": abort(404, "no category found") else: category = Category(folder_doc) docs = auto_pg(bucket=bucket, data_type="post", pager_name="wiki", path=category.path, ignore_marked_id=True, prefix_to_ignore='_', sort_by='-date', min_limit=8) for doc in docs: doc["wiki_url"] = get_wiki_url_for_doc(wiki_root, doc) return render_api_template("builtin_theme_knowbase_category.jade", category=category, docs=docs, wiki_nodes_url=wiki_nodes_url, **kwargs) elif re.match("wiki/post/", request_path): # detail doc_path = get_offset_path(request_path, 2) doc_path = "%s/%s" % (wiki_root, doc_path) doc = get_record_by_path(bucket, doc_path) if not doc: abort(404, "no doc found") else: return render_api_template("builtin_theme_knowbase_post.jade", doc=doc, **kwargs)
def filter_and_get_posts_link_points_info(posts_info, under=""): under = under.strip("/").strip().lower() links_info = get_value_from_data(posts_info, "links.links") if not isinstance(links_info, dict): links_info = {} tags_info = get_value_from_data(posts_info, "tags.tags") if not isinstance(tags_info, dict): tags_info = {} output_nodes = [] output_nodes_map = {} output_links = [] filepath_counter = {} filepath_group_info = {} # hit parent, +5 for tag, tagged_paths in tags_info.items(): valid_count = 0 tag_node_id = "#%s" % tag for path in tagged_paths: if under and not is_sub_path(path, under): continue else: filepath_counter[path] = filepath_counter.get(path, 0) + 1 valid_count += 1 if path not in output_nodes_map: path_node = dict(id=path, name=get_just_name(path), group=2) output_nodes.append(path_node) output_nodes_map[path] = path_node # create node-link if tag_node_id != path: output_links.append(dict(source=tag_node_id, target=path)) if not valid_count: continue tag_node = dict( id=tag_node_id, name=tag, val=valid_count, group=1, ) output_nodes.append(tag_node) output_nodes_map[tag_node_id] = tag_node for source_path, linked_paths in links_info.items(): if under and not is_sub_path(source_path, under): continue valid_count = 0 for path in linked_paths: if under and not is_sub_path(path, under): continue else: filepath_counter[path] = filepath_counter.get(path, 0) + 1 valid_count += 1 if path not in output_nodes_map: path_node = dict(id=path, name=get_just_name(path), group=2) output_nodes.append(path_node) output_nodes_map[path] = path_node # create node-link if source_path != path: output_links.append(dict(source=source_path, target=path)) if not valid_count: continue if source_path not in output_nodes_map: path_node = dict(id=source_path, name=get_just_name(source_path), group=2) output_nodes.append(path_node) output_nodes_map[source_path] = path_node # update path nodes count for path, count in filepath_counter.items(): path_node = output_nodes_map.get(path) if path_node: path_node["val"] = count for node in output_nodes: node_id = node.get("id") if node_id.startswith("#"): continue relative_path = get_relative_path(node_id.lower(), under.lower(), return_name_if_fail=False) if relative_path: level1_parent = relative_path.split("/")[0] if level1_parent not in filepath_group_info: group_id = len(filepath_group_info) + 5 filepath_group_info[level1_parent] = group_id else: group_id = filepath_group_info[level1_parent] node["group"] = group_id output = { "nodes": output_nodes, "links": output_links, } return output
def group(objects, attr, *args, **kwargs): """ list """ if attr.startswith('-'): reverse = True attr = attr.lstrip('-') else: reverse = kwargs.pop('reverse', False) group_sorts = kwargs.pop('group_sorts', None) or [] key = smart_str(attr) parts = key.split(':', 1) attr = parts[0] sub_attrs = [row.strip() for row in parts[1].split('+') ] if len(parts) == 2 else [] # : 表示有子属性,由+进行分割多个字段 # 使用相同的key值,grouped存objects, grouped_sort存排序的key grouped = {} grouped_sort = {} def add_source(group_name): # 添加源到一个 group 上 grouped.setdefault(group_name, []).append(obj) # 组别, 然后添加到这个 group 中 if group_name not in grouped_sort: # 组别的排序, sort_key 是一个可排序的,一般会处理为整数 grouped_sort[group_name] = sort_key for obj in objects: if not sub_attrs: # 没有子属性复合的,直接属性, 比如 group(posts, 'date'), 或者 group(posts, 'date.year') sort_key = sort_value = get_value_from_data( obj, attr) # 不一定是basestring类型的 if type(sort_value) in [list, tuple]: # 目前是tags的处理 # 如果sort_key是一个list,则里面每一个都会分组 for group_name in sort_value: add_source(group_name) else: add_source(group_name=sort_value) else: # 很可能是复合属性 parent = get_value_from_data(obj, attr) sort_values = [] for sub_attr in sub_attrs: sort_values.append(get_value_from_data(parent, sub_attr)) sort_values = tuple([value for value in sort_values if value]) if len(sort_values) == 1: sort_values = sort_values[0] elif not sort_values: sort_values = '' # 避免返回()这样的值 sort_key = _get_sort_key(sort_values, per=100) add_source(group_name=sort_values) # 以values作为group_name # 得到根据sort_key排序了的group_names相关的二元数组 grouped_sort = sorted(grouped_sort.iteritems(), key=operator.itemgetter(1), reverse=reverse) # 获得已经排序成功的group_names sorted_group_names = [group_name for group_name, sort_key in grouped_sort] # 返回最终结果 to_return = [] sorted_groups = [] unsorted_groups = [] for group_name in sorted_group_names: one_group = [group_name, grouped.get(group_name)] to_return.append(one_group) if group_sorts: if group_name in group_sorts: sorted_groups.append(one_group) else: unsorted_groups.append(one_group) if group_sorts: sorted_groups.sort( lambda x, y: group_sorts.index(x[0]) - group_sorts.index(y[0])) to_return = sorted_groups + unsorted_groups return to_return