Esempio n. 1
0
def get_doc_url_for_template_api(doc,
                                 url_prefix,
                                 url_root=None,
                                 hit_url_path=False):
    # hit_url_path=True 的时候,post 上有 url_path, 但跟 post.url 直接调用的逻辑不亦一样
    # post.url 相当于有一个动态的 url_prefix
    if not doc or not isinstance(doc, dict):
        return ""
    if not isinstance(url_prefix, string_types):
        return ""
    if url_root and not isinstance(url_root, string_types):
        return ""
    url_prefix = url_prefix.strip("/")
    doc_path = get_path_from_record(doc)
    if not doc_path:
        return ""
    url_path = ""
    if hit_url_path:
        url_path = smart_unicode(doc.get("url_path") or "").strip("/")
    if url_path:
        return "/%s/%s" % (url_prefix, url_path)
    if not url_root or not isinstance(url_root, string_types):
        return "/%s/%s" % (url_prefix, doc_path)
    else:
        relative_path = get_relative_path(doc_path.lower(),
                                          url_root.lower(),
                                          return_name_if_fail=False)
        if not relative_path:
            return "/%s/%s" % (url_prefix, doc_path)
        else:
            return "/%s/%s" % (url_prefix, relative_path)
Esempio n. 2
0
    def sync_for_deleted_files(self):
        # 处理删除了的文件
        synced = False
        filepaths_to_delete_data = sync_find_files_to_delete(
            self.root, app_name=self.app_name_for_sync, as_dict=True)
        for filepath_to_delete_data in filepaths_to_delete_data:
            filepath_to_delete = filepath_to_delete_data['filepath']
            is_dir = filepath_to_delete_data.get('is_dir', False)
            relative_path = get_relative_path(filepath_to_delete,
                                              root=self.root)
            ipfs_to_delete = self.ipfs_files.pop(relative_path, None)
            if isinstance(ipfs_to_delete, dict):
                ipfs_hash_to_delete = ipfs_to_delete.get('hash')
            else:
                ipfs_hash_to_delete = ipfs_to_delete
            self.remove_file_from_ipfs(ipfs_hash_to_delete)

            # is_deleted=True, send md5 value as version
            md5_value = filepath_to_delete_data.get('md5')

            compiler_sync_worker = FarBoxSyncCompilerWorker(
                server_node=self.server_node,
                root=self.root,
                filepath=filepath_to_delete,
                is_deleted=True,
                is_dir=is_dir,
                private_key=self.private_key,
                should_encrypt_file=self.should_encrypt_file,
                ipfs_key=ipfs_hash_to_delete,
                version=md5_value,
                auto_clean_bucket=self.auto_clean_bucket,
                files_info=self.files_info)
            sync_status = compiler_sync_worker.sync()
            self.record_sync_log(filepath=filepath_to_delete,
                                 sync_status=sync_status,
                                 is_deleted=True)
            if sync_status and sync_status.get('code') == 200:
                synced = True
                # at last, mark status as synced
                after_sync_deleted(filepath_to_delete,
                                   root=self.root,
                                   app_name=self.app_name_for_sync)

        # files on server, but no in local side, clean the configs_for_files
        # should run after self.sync_for_updated_files, to get self.files_info_on_server
        files_info_on_server = get_value_from_data(self.files_info_on_server,
                                                   'message.files') or {}
        for relative_path in files_info_on_server.keys():
            abs_filepath = join(self.root, relative_path)
            if not os.path.isfile(abs_filepath):
                self.ipfs_files.pop(relative_path, None)
                synced = True

        return synced
Esempio n. 3
0
 def get_url(self, prefix, root=None):
     prefix = prefix.strip("/")
     if not root or not isinstance(root, string_types):
         return "/%s/%s" % (prefix, self.path)
     else:
         relative_path = get_relative_path(self.path.lower(),
                                           root.lower(),
                                           return_name_if_fail=False)
         if not relative_path:
             return "/%s/%s" % (prefix, self.path)
         else:
             return "/%s/%s" % (prefix, relative_path)
Esempio n. 4
0
def get_wiki_url_for_doc(wiki_root, doc):
    if not isinstance(wiki_root, string_types) or not isinstance(doc, dict):
        return ""
    wiki_root = wiki_root.strip("/")
    doc_type = get_type_from_record(doc)
    doc_path = get_path_from_record(doc)
    relative_path = get_relative_path(doc_path.lower().strip("/"), wiki_root, return_name_if_fail=False)
    if not relative_path:
        return ""
    if doc_type == "post":
        return "/wiki/post/%s" % relative_path
    else:
        return "/wiki/category/%s" % relative_path
Esempio n. 5
0
def get_template_info(template_dir):
    info = {}
    template_dir = template_dir.strip().rstrip('/')
    if not os.path.isdir(template_dir):
        return info  # ignore
    filepaths = get_all_sub_files(template_dir,
                                  accept_func=os.path.isfile,
                                  max_tried_times=1000)
    for filepath in filepaths:
        relative_path = get_relative_path(
            filepath, root=template_dir).lower()  # lower case
        if not os.path.isfile(filepath):
            continue
        if not is_real(filepath) or is_a_hidden_path(relative_path):
            continue
        if relative_path.startswith('readme.') and is_a_markdown_file(
                relative_path):  # 模板 readme 上的信息
            with open(filepath, 'rb') as f:
                raw_markdown_content = smart_unicode(f.read())
            compiled_markdown_content = compile_markdown(raw_markdown_content)
            compiled_markdown_content_meta = compiled_markdown_content.metadata
            readme_info = dict(content=compiled_markdown_content,
                               metadata=compiled_markdown_content_meta
                               )  # raw_content=raw_markdown_content,
            info['_readme'] = readme_info
        else:
            path_without_ext, ext = os.path.splitext(relative_path)
            ext = ext.strip('.').lower()
            if ext not in allowed_exts:
                continue
            with open(filepath, 'rb') as f:
                raw_content = f.read()
            raw_content = smart_unicode(raw_content)  # to unicode
            info[relative_path] = raw_content
            matched_compiler = template_resource_compilers.get(ext)
            if matched_compiler:
                new_ext, compile_func = matched_compiler
                try:
                    compiled_content = compile_func(raw_content)
                    new_key = path_without_ext + '.' + new_ext.strip('.')
                    info[new_key] = compiled_content
                except Exception as e:
                    error_message = getattr(e, 'message', None)
                    if error_message:
                        try:
                            print('%s error: %s' %
                                  (relative_path, error_message))
                        except:
                            pass
    info['_route'] = get_templates_route_info(info)
    return info
Esempio n. 6
0
    def __init__(self,
                 server_node,
                 root,
                 filepath,
                 private_key=None,
                 should_encrypt_file=False,
                 is_dir=False,
                 is_deleted=False,
                 ipfs_key=None,
                 version=None,
                 auto_clean_bucket=True,
                 relative_path=None,
                 real_relative_path=None,
                 raw_content=None,
                 files_info=None):
        self.files_info = files_info

        self.server_node = server_node
        self.root = root
        self.filepath = filepath
        self.private_key = private_key
        self.should_encrypt_file = should_encrypt_file
        self.is_dir = is_dir
        self.is_deleted = is_deleted
        self.ipfs_key = ipfs_key
        self.version = version
        self.auto_clean_bucket = auto_clean_bucket

        # 主要是 Markdown 文档编译,针对 FarBox Page 时候用的
        self.real_relative_path = real_relative_path

        # 如果没有指定 relative_path, 是需要从 root & filepath 中获得的
        # filepath 是用于获得文件内容的,如果有指定了 raw_content,那就是 raw_content 优先
        self._raw_content = raw_content
        if relative_path:
            self.relative_path = relative_path
        else:
            if not is_sub_path(self.filepath, parent_path=self.root):
                self.relative_path = ''
            else:
                self.relative_path = get_relative_path(self.filepath,
                                                       root=self.root)

        self.lower_relative_path = self.relative_path.lower()
def upload_static_files_to_cdn(static_files_root,
                               cdn_static_prefix,
                               secret_id,
                               secret_key,
                               bucket,
                               region,
                               force_update=False):
    sub_filepaths = get_all_sub_files(static_files_root)
    for filepath in sub_filepaths:
        if is_a_hidden_path(filepath):
            continue
        ext = os.path.splitext(filepath)[-1]
        if ext in [".py", ".pyc", ".pyd"]:
            continue
        #print(filepath)
        relative_path = get_relative_path(filepath, static_files_root)
        cnd_path = "/%s/%s" % (cdn_static_prefix.strip("/"),
                               relative_path.strip("/"))
        if not force_update:
            if has_file_on_qcloud(cnd_path,
                                  secret_id=secret_id,
                                  secret_key=secret_key,
                                  bucket=bucket,
                                  region=region):
                qcloud_file_meta = get_file_meta_on_qcloud(
                    cnd_path,
                    secret_id=secret_id,
                    secret_key=secret_key,
                    bucket=bucket,
                    region=region)
                if qcloud_file_meta and isinstance(qcloud_file_meta, dict):
                    q_version = qcloud_file_meta.get("ETag",
                                                     "").strip("'").strip('"')
                    if q_version and get_md5_for_file(filepath) == q_version:
                        continue
        with open(filepath, "rb") as f:
            upload_file_obj_to_qcloud(file_obj=f,
                                      url_path=cnd_path,
                                      secret_id=secret_id,
                                      secret_key=secret_key,
                                      bucket=bucket,
                                      region=region,
                                      content_type=guess_type(filepath))
        print(filepath)
Esempio n. 8
0
def show_wiki_nodes_as_sub_site():
    bucket = get_bucket_in_request_context()
    if not bucket:
        return
    request_path = get_request_path().strip("/")
    if not re.match("wiki_nodes(/|$)", request_path):
        return
    wiki_configs = get_json_content_by_path(bucket, "__wiki.json", force_dict=True)
    enable_wiki_nodes = auto_type(wiki_configs.get("enable_wiki_nodes", True))
    if not enable_wiki_nodes:
        return
    wiki_root = smart_unicode(wiki_configs.get("wiki_root", ""))
    if not wiki_root:
        return
    wiki_root = wiki_root.strip("/")
    wiki_title = wiki_configs.get("wiki_title") or get_just_name(wiki_root, for_folder=True)
    path = request.values.get("path", "").strip("/")
    if request.values.get("type") == "data":
        # return json data
        wiki_root = wiki_root.lower()
        under = "%s/%s" % (wiki_root, path)
        posts_info = get_bucket_posts_info(bucket)
        data = filter_and_get_posts_link_points_info(posts_info, under=under)
        nodes = data.get("nodes")
        if nodes:
            for node in nodes:
                node_id = node.get("id")
                if node_id and isinstance(node_id, string_types):
                    if node_id.startswith("#"):
                        tag = node_id.lstrip("#")
                        url = "/wiki/tag/%s" % tag
                        node["url"] = url
                    else:
                        relative_path = get_relative_path(node_id.strip("/"), wiki_root, return_name_if_fail=False)
                        if relative_path:
                            url = "/wiki/post/%s" % relative_path
                            node["url"] =  url
        return force_response(data)
    else:
        return render_api_template("builtin_theme_wiki_nodes.jade", wiki_title=wiki_title)
Esempio n. 9
0
def get_static_resources_map():
    static_resources_map = {}
    raw_filepaths = glob.glob('%s/*'%static_folder_path) + glob.glob('%s/*/*'%static_folder_path) + \
                    glob.glob('%s/*/*/*'%static_folder_path) + glob.glob('%s/*/*/*/*'%static_folder_path)
    for filepath in raw_filepaths:
        if os.path.isdir(filepath):
            continue
        ext = os.path.splitext(filepath)[-1].lower()
        if ext in [
                '.py', '.jade', '.coffee', '.scss', '.less', 'jpg', 'gif',
                'png'
        ]:
            continue
        filename = os.path.split(filepath)[-1].lower()
        just_name = os.path.splitext(filename)[0]
        relative_path = get_relative_path(filepath, static_folder_path)
        names = [filename, just_name, relative_path]
        if just_name.startswith('jquery.'):
            names.append(just_name.replace('jquery.', '', 1))
        for name in names:
            static_resources_map[name] = filepath
    return static_resources_map
Esempio n. 10
0
def should_sync(filepath,
                root,
                app_name,
                check_md5=True,
                extra_should_sync_func=None):
    if not os.path.exists(filepath):
        return False
    elif is_a_hidden_path(filepath):
        return False
    elif not is_real(filepath):
        return False

    if check_md5:
        sync_data = get_sync_data(filepath, root, app_name)
        if sync_data:
            if sync_data.get('md5') == md5_for_file(
                    filepath):  # has been synced
                return False

    # 比如 Bitcron, 不允许超过 100 mb 的文件上传
    # elif os.path.getsize(filepath) > 100*1024*1024: # 100Mb+ is not supported
    #return False
    if extra_should_sync_func:
        try:
            result = extra_should_sync_func(filepath, root)
            if isinstance(result, bool):
                return result
        except:
            try:
                relative_path = get_relative_path(filepath, root=root)
                result = extra_should_sync_func(relative_path)
                if isinstance(result, bool):
                    return result
            except:
                pass

    return True
Esempio n. 11
0
    def sync_one_file(self, filepath, lower_files_info_on_server=None, lower_folders_info_on_server=None,
                      re_check=False, should_store_files_info=False):
        if re_check:
            should_sync = detect_should_sync(filepath=filepath, root=self.root,
                                             app_name=self.app_name_for_sync, check_md5=True,
                                             extra_should_sync_func=self.should_sync_file_func)
            if not should_sync:
                return False
        synced = False
        lower_files_info_on_server = lower_files_info_on_server or {}
        lower_folders_info_on_server = lower_folders_info_on_server or []
        is_file = os.path.isfile(filepath)
        relative_path = get_relative_path(filepath, root=self.root)
        file_size = os.path.getsize(filepath)
        file_real_size = file_size
        if self.should_encrypt_file and self.private_key and is_file:
            # encrypted_filepath 是一个临时文件
            encrypted_filepath = encrypt_file(filepath, encrypt_key=self.private_key)
            if not encrypted_filepath:
                return
            file_real_size = os.path.getsize(encrypted_filepath)
            ipfs_key = self.add_file_to_ipfs(encrypted_filepath)
            try:
                os.remove(encrypted_filepath)
            except:
                pass
        elif is_file:
            ipfs_key = self.add_file_to_ipfs(filepath)
        else:
            ipfs_key = None

        file_version = ipfs_key
        if not ipfs_key and os.path.isfile(filepath):
            # 兼容没有 ipfs 的时候,用文件的 md5 值来代替
            file_version = get_md5_for_file(filepath)

        # 跟服务端上的 files 的 lower_files 上的信息进行比对,如果文件相同,则 ignore 掉
        lower_relative_path = to_unicode(relative_path.lower())
        should_ignore = False
        if file_version:
            remote_file_version = get_value_from_data(lower_files_info_on_server.get(lower_relative_path), 'version')
            if not remote_file_version:
                remote_file_version = get_value_from_data(lower_files_info_on_server.get(lower_relative_path), 'hash')
            if remote_file_version == file_version:
                #if settings.DEBUG:
                #    print('has same file on server already for %s' % relative_path)
                should_ignore = True
            self.ipfs_files[relative_path] = dict(hash=file_version, size=file_size, real_size=file_real_size)

        is_dir = os.path.isdir(filepath)
        if is_dir:
            if lower_relative_path in lower_folders_info_on_server:
                #if settings.DEBUG:
                #   print('has same folder on server already for %s' % relative_path)
                should_ignore = True
        if should_ignore:
            # ignore 的进行保存,避免下次 loop 继续被找到
            after_synced(filepath, root=self.root, app_name=self.app_name_for_sync)
        else:
            sync_compiler_worker = FarBoxSyncCompilerWorker(
                server_node=self.server_node,
                root=self.root,
                filepath=filepath,
                is_deleted=False,
                is_dir=is_dir,
                private_key=self.private_key,
                should_encrypt_file=self.should_encrypt_file,
                ipfs_key = ipfs_key,
                auto_clean_bucket=self.auto_clean_bucket,

                files_info=self.files_info,
            )
            sync_status = sync_compiler_worker.sync()
            self.record_sync_log(filepath=filepath, sync_status=sync_status, is_deleted=False)
            if sync_status and sync_status.get('code') == 200:
                synced = True
                after_synced(filepath, root=self.root, app_name=self.app_name_for_sync)

                if settings.DEBUG:
                    print("synced (to) %s" % filepath)

                if should_store_files_info:
                    self.store_files_info()
            elif not sync_status:
                # 没有 status 返回, 认为属于 ignore 的一种
                after_synced(filepath, root=self.root, app_name=self.app_name_for_sync)

        return synced
Esempio n. 12
0
    def load(cls, *resource, **kwargs):
        if getattr(request, 'disable_load_func', False):  # load函数被禁用了
            return ''

        force_load = kwargs.pop('force', False)

        if not resource:
            return ''

        if len(resource) == 1:
            resource = resource[0]
            if resource in lazy_load_map:  # 快捷方式
                resource = lazy_load_map[resource]
            if ' ' in resource:  # load('a b c')的处理
                resource = resource.split(' ')

        # resource 可以是一个 list,也可以是字符串
        # 预先进行类型判断
        if isinstance(resource, (list, tuple)):
            result = ''
            for child_resource in resource:
                if isinstance(child_resource, string_types):
                    result += cls.load(child_resource)
            return result
        elif not isinstance(resource, string_types):
            return ''

        # 确保进入以下流程的都是单个 resource,这样才能达到去重的效果

        # 处理 smart scss 的问题
        if kwargs.pop("scss", False):
            scss_compiled_url = get_smart_scss_url(resource, **kwargs)
            resource = scss_compiled_url

        #相同的 resource,一个页面内,仅允许载入一次
        if is_resource_in_loads_in_page_already(resource) and not force_load:
            # ignore, 页面内已经载入过一次了
            return ""

        if not isinstance(resource, string_types):
            return resource

        # like h.load('jquery')
        if '.' not in resource and resource in web_static_resources_map:
            match_local_filepath = web_static_resources_map[resource]
            #match_filename = os.path.split(match_local_filepath)[-1].lower()
            relative_path = get_relative_path(match_local_filepath,
                                              root=static_folder_path)
            resource = '/__%s' % relative_path

        # url 的相对位置关系的调整~
        raw_resource = resource
        resource = auto_bucket_url_path(resource)

        # 增加 ?version 的逻辑
        if '?' not in resource:
            if resource.startswith('/__'):
                resource = '%s?version=%s' % (resource, STATIC_FILE_VERSION)
            elif '/fb_static/' in resource:
                resource = '%s?version=%s' % (resource, STATIC_FILE_VERSION)
            elif raw_resource.startswith('/template/'):
                template_pages_configs = get_pages_configs()
                template_version = template_pages_configs.get('mtime')
                if template_version:
                    resource = '%s?version=%s' % (resource, template_version)

        resource_path = resource
        if '?' in resource:
            resource_path = resource.split('?')[0]
        ext = os.path.splitext(resource_path)[1].lower()
        if not ext:
            ext = '.%s' % (resource.split('?')[0].split('/')[-1]
                           )  # 比如http://fonts.useso.com/css?family=Lato:300

        if static_files_url and resource.startswith("/fb_static/"):
            static_relative_path = resource.replace("/fb_static/", "", 1)
            static_url = "%s/%s" % (static_files_url, static_relative_path)
            resource = static_url

        if ext in [
                '.js', '.coffee'
        ] or ext.startswith('.js?') or resource.split('?')[0].endswith('js'):
            content = '<script type="text/javascript" src="%s"></script>' % resource
        elif ext in [
                '.css', '.less', '.scss', '.sass'
        ] or ext.startswith('.css?') or ext.endswith('?format=css'):
            content = '<link href="%s" type="text/css" rel="stylesheet"/>' % resource
        else:
            content = ''

        return content
Esempio n. 13
0
def filter_and_get_posts_link_points_info(posts_info, under=""):
    under = under.strip("/").strip().lower()
    links_info = get_value_from_data(posts_info, "links.links")
    if not isinstance(links_info, dict): links_info = {}
    tags_info = get_value_from_data(posts_info, "tags.tags")
    if not isinstance(tags_info, dict): tags_info = {}

    output_nodes = []
    output_nodes_map = {}
    output_links = []
    filepath_counter = {}
    filepath_group_info = {}  # hit parent, +5

    for tag, tagged_paths in tags_info.items():
        valid_count = 0
        tag_node_id = "#%s" % tag
        for path in tagged_paths:
            if under and not is_sub_path(path, under):
                continue
            else:
                filepath_counter[path] = filepath_counter.get(path, 0) + 1
                valid_count += 1
                if path not in output_nodes_map:
                    path_node = dict(id=path,
                                     name=get_just_name(path),
                                     group=2)
                    output_nodes.append(path_node)
                    output_nodes_map[path] = path_node
                # create node-link
                if tag_node_id != path:
                    output_links.append(dict(source=tag_node_id, target=path))
        if not valid_count:
            continue
        tag_node = dict(
            id=tag_node_id,
            name=tag,
            val=valid_count,
            group=1,
        )
        output_nodes.append(tag_node)
        output_nodes_map[tag_node_id] = tag_node

    for source_path, linked_paths in links_info.items():
        if under and not is_sub_path(source_path, under):
            continue
        valid_count = 0
        for path in linked_paths:
            if under and not is_sub_path(path, under):
                continue
            else:
                filepath_counter[path] = filepath_counter.get(path, 0) + 1
                valid_count += 1
                if path not in output_nodes_map:
                    path_node = dict(id=path,
                                     name=get_just_name(path),
                                     group=2)
                    output_nodes.append(path_node)
                    output_nodes_map[path] = path_node
                # create node-link
                if source_path != path:
                    output_links.append(dict(source=source_path, target=path))
        if not valid_count:
            continue
        if source_path not in output_nodes_map:
            path_node = dict(id=source_path,
                             name=get_just_name(source_path),
                             group=2)
            output_nodes.append(path_node)
            output_nodes_map[source_path] = path_node

    # update path nodes count
    for path, count in filepath_counter.items():
        path_node = output_nodes_map.get(path)
        if path_node:
            path_node["val"] = count

    for node in output_nodes:
        node_id = node.get("id")
        if node_id.startswith("#"):
            continue
        relative_path = get_relative_path(node_id.lower(),
                                          under.lower(),
                                          return_name_if_fail=False)
        if relative_path:
            level1_parent = relative_path.split("/")[0]
            if level1_parent not in filepath_group_info:
                group_id = len(filepath_group_info) + 5
                filepath_group_info[level1_parent] = group_id
            else:
                group_id = filepath_group_info[level1_parent]
            node["group"] = group_id

    output = {
        "nodes": output_nodes,
        "links": output_links,
    }

    return output