Exemple #1
0
def get_invitations(limit=100, start_code=None):
    # return the list, 跟 Record 的逻辑一样转换数据
    raw_records = hscan("_bucket_invite",
                        key_start=start_code,
                        limit=limit,
                        reverse_scan=True)
    records = to_py_records_from_raw_ssdb_records(raw_records)
    return records
Exemple #2
0
def show_domains():
    if not get_logined_admin_bucket():
        abort(404)
    per_page = to_per_page(1000,
                           request.values.get('per_page'),
                           max_per_page=10000)
    cursor = request.values.get('cursor') or ''
    domain_docs = hscan('_domain', key_start=cursor, limit=per_page)
    return jsonify(domain_docs)
Exemple #3
0
def list_bucket_paths(bucket):
    if not allowed_to_display_some_bucket_info(bucket):
        return abort(404, "token not valid")
    path_bucket = get_bucket_name_for_path(bucket)
    pre_record_id = request.values.get('cursor')
    per_page = to_per_page(200,
                           request.values.get('per_page'),
                           max_per_page=1000)
    records = hscan(path_bucket, key_start=pre_record_id, limit=per_page)
    return jsonify(records)
Exemple #4
0
def basic_get_buckets_to_sync(namespace, limit=1000):
    if not namespace.startswith('_'):
        namespace = '_' + namespace
    result = hscan(namespace, limit=limit) or []
    if result:
        buckets_data = []
        for bucket, bucket_data in result:
            try:
                bucket_data = json.loads(bucket_data)
            except:
                continue
            buckets_data.append(bucket_data)
        return buckets_data
    else:
        return []
def get_system_timed_records():
    raw_records = hscan('_system_recorder', limit=1000, reverse_scan=True)
    records = []
    for record_id, seconds_cost in raw_records:
        if '_' not in record_id:
            continue
        timestamp, action_name = record_id.split('_', 1)
        try:
            timestamp = int(timestamp)/1000.
        except:
            continue
        date = datetime.datetime.utcfromtimestamp(timestamp)
        date = date.strftime('%Y-%m-%d %H:%M:%S')
        record = '%s UTC %s costs %s' % (date, action_name, seconds_cost)
        records.append(record)
    return records
Exemple #6
0
def get_paths_and_ids_under(bucket, under="", max_limit=10000):
    path_bucket = get_bucket_name_for_path(bucket)
    under = under or ""
    path_under = under.lower().strip('/')  # prefix
    if not path_under:
        path_under_key_start = ''
        path_under_key_end = ''
    else:
        path_under_key_start = path_under + '/'
        path_under_key_end = path_under + '0'
    ids_and_paths = []
    raw_result = hscan(path_bucket,
                       key_start=path_under_key_start,
                       key_end=path_under_key_end,
                       limit=max_limit)
    for path, record_id in raw_result:
        record_id = just_get_record_id(record_id)
        path = smart_unicode(path)
        ids_and_paths.append([path, record_id])
    return ids_and_paths
Exemple #7
0
def get_records_for_bucket(bucket,
                           start_record_id=None,
                           end_record_id=None,
                           limit=1000,
                           includes_start_record_id=False,
                           reverse_scan=False,
                           raw=False):
    # start_record_id means start here but does not include itself
    # 在两个地方用到: url 中 list bucket 的 & 模板引擎中调用 records 的
    # 模板引擎中调用的话,一般都会指定 start_record_id,避免一些 zero_ids 在实际呈现无意义的数据被显示出来
    # 一般是不 includes_start_record_id, 上一次获得的 list 的最后一个 record 的 id 会作为 cursor (start_id),
    #   也就是说 start_id 不应该在当前获取的 list 内
    records = hscan(bucket,
                    key_start=start_record_id,
                    key_end=end_record_id,
                    limit=limit,
                    reverse_scan=reverse_scan)
    if includes_start_record_id and start_record_id:
        start_record = hget(bucket, start_record_id)
        if start_record:
            records.insert(0, [start_record_id, start_record])
    if not raw:
        records = to_py_records_from_raw_ssdb_records(records)
    return records
Exemple #8
0
def get_files_info(bucket):
    data = {}
    path_bucket = get_bucket_name_for_path(bucket)
    data['files'] = {}
    data['folders'] = {}
    data['lower_files'] = {}
    data['lower_folders'] = []  # not a dict
    lower_folders = []
    lower_folders_count = {}
    records = hscan(path_bucket, key_start='', limit=20000)
    for filepath, filepath_data_string in records:
        if filepath.startswith('_'):
            continue
        lower_filepath = filepath.strip().lower()
        # prepare raw data starts
        raw_filepath_data = filepath_data_string.split(',')
        if len(raw_filepath_data) != 3:
            continue
        filepath_data_keys = ['record_id', 'size', 'version']
        filepath_data = dict(zip(filepath_data_keys, raw_filepath_data))
        filepath_data['size'] = to_int(filepath_data['size'], default_if_fail=0)
        if filepath_data.get('version') == 'folder':
            #is_dir = True
            is_image = False
            is_markdown = False
            data['folders'][filepath] = filepath_data
            if lower_filepath not in lower_folders:
                lower_folders.append(lower_filepath)
        else:
            #is_dir = False
            # prepare raw data ends
            is_image = is_a_image_file(filepath)
            is_markdown = is_a_markdown_file(filepath)
            data['files'][filepath] = filepath_data
            data['lower_files'][filepath.strip().lower()] = filepath_data
        lower_folder_path = os.path.split(filepath.strip().lower())[0]
        if lower_folder_path:
            parts = lower_folder_path.split('/')
            parts_length = len(parts)
            if parts_length > 10:
                continue
            for i in range(parts_length):
                one_lower_folder_path = '/'.join(parts[:i + 1])
                last_path_part = one_lower_folder_path.split('/')[-1]
                if last_path_part.startswith('_'):
                    continue
                if one_lower_folder_path not in lower_folders:
                    lower_folders.append(one_lower_folder_path)
                if one_lower_folder_path:
                    images_count_plus = 1 if is_image else 0
                    posts_count_plus = 1 if is_markdown else 0
                    _images_count_plus = 1 if images_count_plus and lower_folder_path == one_lower_folder_path else 0
                    _posts_count_plus = 1 if posts_count_plus and lower_folder_path == one_lower_folder_path else 0
                    matched_count = lower_folders_count.setdefault(one_lower_folder_path, {})
                    matched_count['images_count'] = matched_count.get('images_count', 0) + images_count_plus
                    matched_count['posts_count'] = matched_count.get('posts_count', 0) + posts_count_plus
                    matched_count['_images_count'] = matched_count.get('_images_count', 0) + _images_count_plus
                    matched_count['_posts_count'] = matched_count.get('_posts_count', 0) + _posts_count_plus
    data['lower_folders'] = lower_folders
    data['lower_folders_count'] = lower_folders_count

    data['date'] = time.time()
    return data