def show_domains(): if not get_logined_admin_bucket(): abort(404) per_page = to_per_page(1000, request.values.get('per_page'), max_per_page=10000) cursor = request.values.get('cursor') or '' domain_docs = hscan('_domain', key_start=cursor, limit=per_page) return jsonify(domain_docs)
def list_bucket_paths(bucket): if not allowed_to_display_some_bucket_info(bucket): return abort(404, "token not valid") path_bucket = get_bucket_name_for_path(bucket) pre_record_id = request.values.get('cursor') per_page = to_per_page(200, request.values.get('per_page'), max_per_page=1000) records = hscan(path_bucket, key_start=pre_record_id, limit=per_page) return jsonify(records)
def show_bucket_configs(bucket, configs_type): if configs_type in ["order", "sort", "sorts"]: configs_type = "orders" if not allowed_to_display_some_bucket_info(bucket): return abort(404, "token not valid") if configs_type == "files": configs = auto_update_bucket_and_get_files_info(bucket) else: configs = get_bucket_configs(bucket, configs_type) or {} return jsonify(configs)
def show_buckets(): # cursor 是 bucket 的最后更新时间 # 在 record 创建的过程中, 会更新 bucket, 从而让 buckets 按照最后更新时间,进行排序 per_page = to_per_page(1000, request.values.get('per_page'), max_per_page=10000) try: cursor = int(request.values.get('cursor') or '') except: cursor = '' buckets_result = zrscan('buckets', score_start=cursor, limit=per_page) return jsonify(buckets_result)
def show_namespaces(): # also bucket names # just bucket names, cursor 是上一个 bucket name per_page = to_per_page(1000, request.values.get('per_page'), max_per_page=10000) cursor = request.values.get('cursor') or '' bucket_names = hlist(name_start=cursor, limit=per_page) bucket_names = [ name for name in bucket_names if is_valid_bucket_name(name) ] return jsonify(bucket_names)
def show_bucket_records_for_web_request(bucket=None, default_records_per_page=100, includes_zero_ids=True, cursor=None, per_page=None): # return response or abort error bucket = bucket or get_logined_bucket_by_token() # by api token if not bucket: abort(404, "no bucket matched") set_bucket_in_request_context(bucket) pre_record_id = cursor or request.values.get('cursor') if not includes_zero_ids and not pre_record_id: # 不包括 zero ids 相当于 pre_record_id = zero_id_for_finder per_page = per_page or to_per_page(default_records_per_page, request.values.get('per_page'), max_per_page=1000) records = get_records_for_bucket(bucket, start_record_id=pre_record_id, limit=per_page) return jsonify(records)
def show_bucket_records_for_web_request(bucket=None, default_records_per_page=100, includes_zero_ids=True, cursor=None, per_page=None): # return response or abort error # 注意: 如果传入一个有效的 bucket,那么是不会进行校验的 bucket = bucket or get_logined_bucket_by_token() # by api token if not bucket: # 服务器端同步相关的逻辑在这里判断 server_sync_token = request.values.get("server_sync_token", "") if server_sync_token and server_sync_token == get_env("server_sync_token"): bucket = get_pending_bucket_bucket_in_request() if not bucket: abort(404, "no bucket matched") set_bucket_in_request_context(bucket) pre_record_id = cursor or request.values.get('cursor') if not includes_zero_ids and not pre_record_id: # 不包括 zero ids 相当于 pre_record_id = zero_id_for_finder per_page = per_page or to_per_page(default_records_per_page, request.values.get('per_page'), max_per_page=1000) records = get_records_for_bucket(bucket, start_record_id=pre_record_id, limit=per_page) return jsonify(records)
def show_buckets_should_be_synced(): # 需要从 remote 同步过来的 buckets buckets_data = get_buckets_to_sync_from_remote() return jsonify(buckets_data)
def show_node_status(): node_status = get_current_node_status() return jsonify(node_status)