示例#1
0
def load_theme_from_template_folder_for_bucket(bucket, prefix="template"):
    if not has_bucket(bucket):
        return
    info = {}
    prefix = prefix.strip().strip("/")
    template_paths = get_paths_under(bucket, under=prefix)
    for _relative_path in template_paths:
        relative_path = _relative_path.replace("%s/"%prefix, "", 1)
        raw_content = get_raw_content_by_path(bucket, _relative_path)
        if not raw_content:
            continue
        path_without_ext, ext = os.path.splitext(relative_path)
        ext = ext.lower().strip('.')
        if ext not in allowed_exts:
            continue
        raw_content = smart_unicode(raw_content)  # to unicode
        info[relative_path] = raw_content
        matched_compiler = server_side_template_resource_compilers.get(ext)
        if matched_compiler:
            new_ext, compile_func = matched_compiler
            try:
                compiled_content = compile_func(raw_content)
                new_key = path_without_ext + '.' + new_ext.strip('.')
                info[new_key] = compiled_content
            except Exception as e:
                pass
    info["_route"] = get_templates_route_info(info)

    set_bucket_configs(bucket, info, config_type='pages')

    return info
示例#2
0
def sync_file_by_web_request():
    relative_path = (request.values.get('path')
                     or request.values.get('relative_path') or '').strip()
    relative_path = relative_path.lstrip('/')
    real_relative_path = request.values.get("real_path",
                                            "").strip().lstrip("/")
    content = get_file_content_in_request() or request.values.get(
        'raw_content') or request.values.get('content')
    is_dir = request.values.get('is_dir') == 'true'
    is_deleted = request.values.get('is_deleted') == 'true'
    bucket = get_logined_bucket()
    should_check_login = True
    if not bucket:
        bucket = get_logined_bucket_by_token()  # by api token
        if bucket and request.values.get(
                "action") == "check":  # 仅仅是校验当前的 token 是否正确了
            return jsonify(dict(status='ok'))
        should_check_login = False
    if not relative_path:
        error_info = 'set path first'
    elif not bucket:
        error_info = 'no bucket matched'
    elif should_check_login and not is_bucket_login(bucket=bucket):
        error_info = 'bucket is not login'
    elif is_deleted and is_dir and get_paths_under(bucket=bucket,
                                                   under=relative_path):
        error_info = 'a non-empty folder is not allowed to delete on web file manager'
    elif content and len(content) > MAX_FILE_SIZE:
        error_info = "max file size is %s" % MAX_FILE_SIZE
    else:
        # 处理 .configs/sorts.json -> orders
        content_handled = False
        error_info = ""
        if relative_path in [".configs/sorts.json", "configs/sorts.json"]:
            try:
                raw_sorts_data = json.loads(content)
                if isinstance(raw_sorts_data,
                              dict) and "__positions" in raw_sorts_data:
                    sorts_data = raw_sorts_data.get("__positions")
                    if isinstance(sorts_data, dict):
                        set_bucket_configs(bucket,
                                           configs=sorts_data,
                                           config_type="orders")
                        content_handled = True
            except:
                pass
        if not content_handled:
            error_info = sync_file_by_server_side(
                bucket=bucket,
                relative_path=relative_path,
                content=content,
                is_dir=is_dir,
                is_deleted=is_deleted,
                real_relative_path=real_relative_path)
    if not error_info:
        return jsonify(dict(status='ok'))
    else:
        return json_if_error(400, dict(status='failed', message=error_info))
示例#3
0
文件: handler.py 项目: zhiiker/FarBox
 def set_bucket_theme(self):
     # 从系统默认提供的 theme 中进行直接的设定
     theme_key = self.raw_json_data.get('theme') or self.raw_json_data.get(
         'theme_key')
     theme_content = themes.get(theme_key) or ''
     if not theme_content or not isinstance(theme_content, dict):
         return json_with_status_code(404, 'can not find the theme')
     else:
         if '_theme_key' not in theme_content:
             theme_content['_theme_key'] = theme_key
         set_bucket_configs(self.bucket, theme_content, config_type='pages')
         return json_with_status_code(200, 'ok')
示例#4
0
文件: handler.py 项目: zhiiker/FarBox
 def update_bucket_config(self):
     config_type = self.action.replace('config_', '').strip()
     configs = self.raw_json_data
     updated = set_bucket_configs(self.bucket,
                                  configs,
                                  config_type=config_type)
     if config_type in ['files', 'file']:
         # 不由 client 端设定 files 的信息
         return json_with_status_code(200, 'ok')
     if WEBSOCKET and config_type == 'pages' and configs.get(
             '__changed_filepaths'):
         # 推送 websocket 的通知, 如果启用了 websocket 的话,这样可以实时刷新 template
         changed_filepaths = configs.get('__changed_filepaths')
         message_to_push = dict(changed_filepaths=changed_filepaths,
                                date=time.time())
         push_message_to_bucket(bucket=self.bucket, message=message_to_push)
     if not updated:
         return json_with_status_code(
             400, 'configs format error or no bucket matched')
     else:
         # 先移除 ipfs 相关的逻辑 @2021-2-4
         # from farbox_bucket.ipfs.server.ipfs_bucket import mark_bucket_to_sync_ipfs
         #if config_type == 'files':
         # todo 这里处理是否妥当??
         #mark_bucket_to_sync_ipfs(self.bucket)
         return json_with_status_code(200, 'ok')
示例#5
0
文件: bucket.py 项目: zhiiker/FarBox
 def apply_builtin_theme(self, theme_key, return_result=False):
     theme_key = theme_key.lower().strip()  # key 都是小写的
     theme_content = builtin_themes.get(theme_key) or ""
     if not self.logined_bucket:
         if return_result:
             return "not-login"
     if not theme_content or not isinstance(theme_content, dict):
         if return_result:
             return "not-found"
     else:
         if '_theme_key' not in theme_content:
             theme_content['_theme_key'] = theme_key
         set_bucket_configs(self.logined_bucket,
                            theme_content,
                            config_type='pages')
         if return_result:
             return "ok"
     return ""
示例#6
0
def do_set_bucket_pages_configs_by_web_api(bucket, remote_url, timeout=3):
    if not has_bucket(bucket):
        return
    if not isinstance(remote_url, string_types):
        return
    if "://" not in remote_url:
        remote_url = "http://" + remote_url
    try:
        response = requests.get(remote_url, timeout=timeout)
        raw_pages_configs = response.json()
        if not isinstance(raw_pages_configs, dict):
            return
        if not raw_pages_configs.get("_route"):
            return
        raw_pages_configs["can_copy"] = False
        set_bucket_configs(bucket, raw_pages_configs, config_type="pages")
        return True
    except:
        pass
示例#7
0
def init_server_status_bucket():
    if 'utc_offset' not in os.environ:
        # 统计系统信息时候,可读性使用的 utc_offset
        utc_offset = get_env('utc_offset')
        if utc_offset is None:
            utc_offset = 8
        try:
            utc_offset = str(utc_offset)
        except:
            utc_offset = '8'
        os.environ['utc_offset'] = utc_offset

    configs = get_server_status_bucket_configs()
    bucket = configs['bucket']
    public_key = configs['public_key']
    if has_bucket(bucket):
        return

    create_bucket_by_public_key(public_key)
    set_bucket_configs(bucket,
                       config_type='pages',
                       configs=bucket_web_template)
示例#8
0
def update_bucket_files_info(bucket, last_record_id=None):
    files_info = get_files_info(bucket)
    set_bucket_configs(bucket, configs=files_info, config_type='files')
    if last_record_id:
        set_bucket_last_record_id_computed(bucket, last_record_id)
    return files_info
示例#9
0
def create_record(bucket,
                  record_data,
                  avoid_repeated=True,
                  auto_id=True,
                  file_content=None,
                  return_record=False):
    # make sure the bucket is correct before create record
    # 如果返回数据,就是 error_info
    # avoid_repeated 就是避免跟最后一条数据 body 是一样的
    error_info = get_record_data_error_info(record_data)
    if error_info:
        return error_info

    py_record_data = ssdb_data_to_py_data(record_data)
    byte_record_data = py_data_to_ssdb_data(record_data)

    if auto_id:
        object_id = str(ObjectId())
        if '_id' not in py_record_data and isinstance(
                py_record_data, dict):  # record data 如有必要自动填充 _id
            py_record_data['_id'] = object_id
    else:
        object_id = py_record_data.get('_id') or py_record_data.get('id')
        avoid_repeated = False  # 指定的 id 的,不做 repeated 的校验
        if not object_id:
            return 'auto_id disabled, should pass id in the record data'

    if avoid_repeated:  # 避免最后一条记录的重复
        record_md5 = to_md5(byte_record_data)
        if not allowed_to_create_record_in_bucket(bucket, record_md5):
            error_info = 'current data is repeated to latest record @%s' % bucket
            if isinstance(py_record_data, dict):
                path_in_record = py_record_data.get('path')
                if path_in_record:
                    error_info += smart_unicode(', the path is %s' %
                                                path_in_record)
            return error_info
        else:
            update_bucket_last_record_md5(bucket, record_md5)

    # '_auto_clean_bucket' in record_data and is `True`
    # 如果是 delete 的直接删除 (break),反之则是完全的 update,相当于新的 record 代替 旧的  record
    auto_clean_status = auto_clean_record_before_handle_path_related_record(
        bucket, py_record_data)
    if auto_clean_status == 'break':
        return

    # store pre_object_id
    # 获得上一个对象的 id, 将当前的 data 转为 dict (如果是),存储 _pre_id 这个字段
    pre_object_id = get_bucket_max_id(bucket)
    if pre_object_id:
        if isinstance(py_record_data, dict):
            py_record_data['_pre_id'] = pre_object_id

    # 存储 record, 并且更新 bucket 上 max_id 的信息
    # 由于 record_id 是随机生成,本质上不会重复,故 ignore_if_exists=False, 避免一次校验的过程
    hset(bucket, object_id, py_record_data, ignore_if_exists=False)

    after_record_created(bucket,
                         py_record_data,
                         object_id=object_id,
                         should_update_bucket_max_id=True)

    # 更新 buckets 的信息,表示当前 bucket 刚刚被更新过了
    set_bucket_into_buckets(bucket)
    if py_record_data.get("path"):
        # path 相关的,因为有 delete 的关系,单独进行 last_record_id 的存储,不然直接 hget_max 就可以了
        set_bucket_last_record_id(bucket, object_id)

    if file_content and not py_record_data.get("raw_content"):
        # 指定了要存储的 file content,并且 record 中并没有 raw_content 这个字段,进行文件的存储
        storage.accept_upload_file_from_client(
            bucket, py_record_data, get_raw_content_func=file_content)

    if py_record_data.get("path") == "settings.json" and py_record_data.get(
            "raw_content"):
        try:
            site_settings = json_loads(py_record_data.get("raw_content"))
            if isinstance(site_settings, dict):
                set_bucket_configs(bucket, site_settings, config_type="site")
        except:
            pass

    if return_record:
        return py_record_data
示例#10
0
def update_server_status_bucket_template():
    bucket = get_server_status_bucket()
    if bucket and has_bucket(bucket):
        set_bucket_configs(bucket,
                           config_type='pages',
                           configs=bucket_web_template)
示例#11
0
def update_post_tags_words_info(bucket, record_data):
    path = get_path_from_record(record_data)
    lower_path = path.lower().lstrip('/')
    if not path:
        return
    if not is_a_markdown_file(path):
        return
    if lower_path.startswith('_nav/'):
        return
    posts_info = get_bucket_posts_info(bucket) or {}
    # data init
    bucket_text_words = to_int(posts_info.get('text_words') or 0,
                               default_if_fail=0)

    # prepare tags info
    tags_info = posts_info.setdefault(
        'tags',
        {})  # {'paths':{path:[tag1,tag2]} ,  'tags': {'tag':[path1, path2]} }
    tags_info_tags = tags_info.setdefault('tags', {})
    tags_info_paths = tags_info.setdefault('paths', {})

    # prepare links info
    links_info = posts_info.setdefault(
        "links", {})  # {'paths': {path:[back_path1, back_path2]]} ,
    #   'back_paths': {'back_path':[path1, path2]} }
    links_info_links = links_info.setdefault("links", {})
    links_info_paths = links_info.setdefault("paths", {})

    words_info = posts_info.setdefault('words', {})  # {'path': text_words}

    is_deleted = record_data.get('is_deleted', False)
    post_status = record_data.get('status') or 'public'
    post_tags = record_data.get('tags') or []
    if not isinstance(post_tags, (list, tuple)):
        post_tags = []

    post_doc_links, wiki_tags = get_linked_docs_from_markdown_content(
        path,
        record_data.get("raw_content"),
        md_link_abs_check_func=partial(has_record_by_path, bucket))
    if not isinstance(post_doc_links, (list, tuple)):
        post_doc_links = []

    text_words = to_int(record_data.get('text_words'), default_if_fail=0)

    # 如果已有 words 的信息,先减去,避免重复计算
    old_text_words = to_int(words_info.get(lower_path), default_if_fail=0)
    if old_text_words:
        bucket_text_words -= old_text_words

    # deleted, 草稿类似的状态,不进入计算;去除相关的信息
    if is_deleted or post_status in ['draft', 'private']:
        words_info.pop(lower_path, None)

        # handle delete tags
        old_tags = tags_info_paths.get(lower_path)
        if not isinstance(old_tags, (list, tuple)):
            old_tags = []
        old_tags = [smart_unicode(tag) for tag in old_tags]
        tags_info_paths.pop(lower_path, None)
        for tag in old_tags:
            tags_info_tags_for_tag_paths = tags_info_tags.setdefault(tag, [])
            if lower_path in tags_info_tags_for_tag_paths:
                tags_info_tags_for_tag_paths.remove(lower_path)
                if not tags_info_tags_for_tag_paths:  # 空的 tags
                    tags_info_tags.pop(tag, None)

        # handle delete links
        old_links = links_info_paths.get(lower_path)
        if not isinstance(old_links, (list, tuple)):
            old_links = []
        old_links = [smart_unicode(link) for link in old_links]
        links_info_paths.pop(lower_path, None)
        for link in old_links:
            links_info_link_back_paths = links_info_links.setdefault(link, [])
            if lower_path in links_info_link_back_paths:
                links_info_link_back_paths.remove(lower_path)
                if not links_info_link_back_paths:  # 空的 links 了
                    links_info_links.pop(link, None)

    else:
        bucket_text_words += text_words
        words_info[lower_path] = text_words

        # handle tags
        if post_tags:
            tags_info_paths[lower_path] = post_tags
        for tag in post_tags:
            tags_info_tags_for_tag_paths = tags_info_tags.setdefault(tag, [])
            if lower_path not in tags_info_tags_for_tag_paths:
                tags_info_tags_for_tag_paths.append(lower_path)
        empty_tags = []
        for tag, paths_tagged in tags_info_tags.items():
            if not paths_tagged:
                empty_tags.append(tag)
                continue
            if not isinstance(paths_tagged, list):
                continue
            if lower_path in paths_tagged and tag not in post_tags:
                paths_tagged.remove(lower_path)
            if not paths_tagged:
                empty_tags.append(tag)
        for empty_tag in empty_tags:
            tags_info_tags.pop(empty_tag, None)

        # handle links
        if post_doc_links:
            links_info_paths[lower_path] = post_doc_links
        for link in post_doc_links:
            links_info_link_back_paths = links_info_links.setdefault(link, [])
            if lower_path not in links_info_link_back_paths:
                links_info_link_back_paths.append(lower_path)
        empty_links = []
        for link, paths_linked in links_info_links.items():
            if not paths_linked:
                empty_links.append(link)
                continue
            if not isinstance(paths_linked, list):
                continue
            if lower_path in paths_linked and link not in post_doc_links:
                paths_linked.remove(lower_path)
            if not paths_linked:
                empty_links.append(link)
        for empty_link in empty_links:
            links_info_links.pop(empty_link, None)

    if bucket_text_words < 0:
        bucket_text_words = 0

    posts_info['text_words'] = bucket_text_words

    set_bucket_configs(bucket, configs=posts_info, config_type='posts')