Ejemplo n.º 1
0
def check_user_share_quota(username, repo, users=[], groups=[]):
    """Check whether user has enough quota when share repo to users/groups.
    """
    if not users and not groups:
        return True

    if not seaserv.CALC_SHARE_USAGE:
        return True

    check_pass = False
    quota = seafile_api.get_user_quota(username)
    self_usage = seafile_api.get_user_self_usage(username)
    current_share_usage = seafile_api.get_user_share_usage(username)

    share_usage = 0
    if users:
        share_usage += seafile_api.get_repo_size(repo.id) * (len(users))

    if groups:
        grp_members = []
        for group in groups:
            grp_members += [
                e.user_name for e in seaserv.get_group_members(group.id)
            ]
        grp_members = set(grp_members)
        share_usage += seafile_api.get_repo_size(
            repo.id) * (len(grp_members) - 1)
    if share_usage + self_usage + current_share_usage < quota:
        check_pass = True

    return check_pass
Ejemplo n.º 2
0
def check_user_share_quota(username, repo, users=[], groups=[]):
    """Check whether user has enough quota when share repo to users/groups.
    """
    if not users and not groups:
        return True

    if not seaserv.CALC_SHARE_USAGE:
        return True

    check_pass = False
    quota = seafile_api.get_user_quota(username)
    self_usage = seafile_api.get_user_self_usage(username)
    current_share_usage = seafile_api.get_user_share_usage(username)

    share_usage = 0
    if users:
        share_usage += seafile_api.get_repo_size(repo.id) * (len(users))

    if groups:
        grp_members = []
        for group in groups:
            grp_members += [ e.user_name for e in seaserv.get_group_members(group.id)]
        grp_members = set(grp_members)
        share_usage += seafile_api.get_repo_size(repo.id) * (len(grp_members) -1)
    if share_usage + self_usage + current_share_usage < quota:
        check_pass = True

    return check_pass
Ejemplo n.º 3
0
def _wait_repo_size_recompute(repo, size, maxretry=30):
    reposize = seafilerpc.get_repo_size(repo.id)
    retry = 0
    while reposize != size:
        if retry >= maxretry:
            assert False, 'repo size not recomputed in %s seconds' % maxretry
        retry += 1
        print 'computed = %s, expected = %s' % (reposize, size)
        time.sleep(1)
        reposize = seafilerpc.get_repo_size(repo.id)
Ejemplo n.º 4
0
def _wait_repo_size_recompute(repo, size, maxretry=30):
    reposize = seafilerpc.get_repo_size(repo.id)
    retry = 0
    while reposize != size:
        if retry >= maxretry:
            assert False, 'repo size not recomputed in %s seconds' % maxretry
        retry += 1
        print 'computed = %s, expected = %s' % (reposize, size)
        time.sleep(1)
        reposize = seafilerpc.get_repo_size(repo.id)
Ejemplo n.º 5
0
def check_user_workspace_quota(workspace):
    """
    check workspace is whether valid about quota
    """
    # if workspace is a group workspace and not a org workspace, don't need to check
    # because users are not allowed to create groups but org users can
    if '@seafile_group' in workspace.owner and workspace.org_id == -1:
        return True
    if workspace.org_id != -1:  # org workspace, check the sum of the org's all workspace size is whether valid
        org_role = OrgSettings.objects.filter(org_id=workspace.org_id).first()
        org_role = org_role.role if org_role else ORG_DEFAULT
        quota = get_enabled_role_permissions_by_role(org_role).get(
            'role_asset_quota', '')
        quota = get_quota_from_string(quota) if quota else quota
        if quota:
            asset_size = cache.get(
                normalize_cache_key(str(workspace.org_id),
                                    ASSET_SIZE_CACHE_PREFIX))
            if not asset_size:
                repo_ids = Workspaces.objects.filter(
                    org_id=workspace.org_id).values_list('repo_id', flat=True)
                asset_size = 0
                for repo_id in repo_ids:
                    asset_size += seafile_api.get_repo_size(repo_id)
                cache.set(
                    normalize_cache_key(str(workspace.id),
                                        ASSET_SIZE_CACHE_PREFIX), asset_size,
                    ASSET_SIZE_CACHE_TIMEOUT)
            if int(asset_size) > quota:
                return False
    else:  # check user's workspace size
        user = ccnet_api.get_emailuser_with_import(workspace.owner)
        if not user:
            return False
        quota = get_enabled_role_permissions_by_role(user.role).get(
            'role_asset_quota', '')
        quota = get_quota_from_string(quota) if quota else quota
        if quota and seafile_api.get_repo_size(workspace.repo_id) > quota:
            return False
    return True
Ejemplo n.º 6
0
Archivo: repo.py Proyecto: genba/seahub
def get_repo_size(repo_id):
    return seafile_api.get_repo_size(repo_id)
Ejemplo n.º 7
0
def get_repo_size(repo_id):
    return seafile_api.get_repo_size(repo_id)
Ejemplo n.º 8
0
def test_merge_virtual_repo(repo):
    api.post_dir(repo.id, '/dir1', 'subdir1', USER)
    api.post_dir(repo.id, '/dir2', 'subdir2', USER)
    v_repo_id = api.share_subdir_to_user(repo.id, '/dir1', USER, USER2, 'rw')

    create_test_file()
    params = {'ret-json': '1'}
    obj_id = '{"parent_dir":"/"}'
    create_test_dir(repo, 'test')

    #test upload file to vritual repo root dir.
    token = api.get_fileserver_access_token(v_repo_id, obj_id, 'upload', USER2,
                                            False)
    upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token
    m = MultipartEncoder(
        fields={
            'parent_dir': '/',
            'file': (file_name, open(file_path, 'rb'),
                     'application/octet-stream')
        })
    response = requests.post(upload_url_base,
                             params=params,
                             data=m,
                             headers={'Content-Type': m.content_type})
    assert_upload_response(response, False, False)

    time.sleep(1.5)
    repo_size = api.get_repo_size(v_repo_id)
    assert repo_size == 0

    time.sleep(1.5)
    repo_size = api.get_repo_size(repo.id)
    assert repo_size == 0

    #test resumable upload file to virtual repo root dir
    parent_dir = '/'
    headers = {
        'Content-Range':
        'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),
                               str(total_size)),
        'Content-Disposition':
        'attachment; filename=\"{}\"'.format(resumable_file_name)
    }
    response = request_resumable_upload(chunked_part1_path, headers,
                                        upload_url_base, parent_dir, False)
    assert_resumable_upload_response(response, v_repo_id, resumable_file_name,
                                     False)

    time.sleep(1.5)
    v_repo_size = api.get_repo_size(v_repo_id)
    assert v_repo_size == 0
    time.sleep(1.5)
    repo_size = api.get_repo_size(repo.id)
    assert repo_size == 0

    headers = {
        'Content-Range':
        'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),
                                str(total_size - 1), str(total_size)),
        'Content-Disposition':
        'attachment; filename=\"{}\"'.format(resumable_file_name)
    }
    response = request_resumable_upload(chunked_part2_path, headers,
                                        upload_url_base, parent_dir, False)
    assert_resumable_upload_response(response, v_repo_id, resumable_file_name,
                                     True)

    time.sleep(2.5)
    v_repo_size = api.get_repo_size(v_repo_id)
    assert v_repo_size == total_size
    time.sleep(1.5)
    repo_size = api.get_repo_size(repo.id)
    assert repo_size == total_size

    #test update file to virtual repo.
    write_file(file_path, file_content)
    token = api.get_fileserver_access_token(v_repo_id, obj_id, 'update', USER2,
                                            False)
    update_url_base = 'http://127.0.0.1:8082/update-api/' + token
    m = MultipartEncoder(
        fields={
            'target_file': '/' + file_name,
            'file': (file_name, open(file_path, 'rb'),
                     'application/octet-stream')
        })
    response = requests.post(update_url_base,
                             data=m,
                             headers={'Content-Type': m.content_type})
    assert_update_response(response, False)

    time.sleep(1.5)
    v_repo_size = api.get_repo_size(v_repo_id)
    assert v_repo_size == total_size + file_size
    time.sleep(1.5)
    repo_size = api.get_repo_size(repo.id)
    assert repo_size == total_size + file_size

    api.del_file(v_repo_id, '/', file_name, USER2)

    time.sleep(1.5)
    v_repo_size = api.get_repo_size(v_repo_id)
    assert v_repo_size == total_size
    time.sleep(1.5)
    repo_size = api.get_repo_size(repo.id)
    assert repo_size == total_size

    api.del_file(v_repo_id, '/', resumable_file_name, USER2)

    time.sleep(1.5)
    v_repo_size = api.get_repo_size(v_repo_id)
    assert v_repo_size == 0
    time.sleep(1.5)
    repo_size = api.get_repo_size(repo.id)
    assert repo_size == 0

    api.del_file(repo.id, '/dir1', 'subdir1', USER)
    api.del_file(repo.id, '/dir2', 'subdir1', USER)
    assert api.unshare_subdir_for_user(repo.id, '/dir1', USER, USER2) == 0
    del_local_files()
Ejemplo n.º 9
0
def test_api(repo):
    create_test_file()
    params = {'ret-json':'1'}
    obj_id = '{"parent_dir":"/"}'
    create_test_dir(repo,'test')
    #test upload file to test dir instead of  root dir.
    token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)
    upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/test',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert response.status_code == 403

    #test upload file to root dir.
    params = {'ret-json':'1'}
    token = api.get_fileserver_access_token(repo.id, obj_id, 'upload', USER, False)
    upload_url_base = 'http://127.0.0.1:8082/upload-api/' + token
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert_upload_response(response, False, False)

    time.sleep (1.5)
    repo_size = api.get_repo_size (repo.id)
    assert repo_size == 0

    #test upload file to test dir instead of root dir when file already exists and replace is set.
    params = {'ret-json':'1'}
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/test',
                    'replace': '1',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert response.status_code == 403

    #test upload file to root dir when file already exists and replace is set.
    params = {'ret-json':'1'}
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/',
                    'replace': '1',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert_upload_response(response, True, True)

    time.sleep (1.5)
    repo_size = api.get_repo_size (repo.id)
    assert repo_size == 0

    #test upload file to test dir instead of root dir when file already exists and replace is unset.
    params = {'ret-json':'1'}
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/test',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert response.status_code == 403

    #test upload file to root dir when file already exists and replace is unset.
    params = {'ret-json':'1'}
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert_upload_response(response, False, True)

    time.sleep (1.5)
    repo_size = api.get_repo_size (repo.id)
    assert repo_size == 0

    #test upload the file to subdir whose parent is test.
    params = {'ret-json':'1'}
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/test',
                    'relative_path': 'subdir',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert response.status_code == 403

    #test upload the file to subdir.
    params = {'ret-json':'1'}
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/',
                    'relative_path': 'subdir',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert_upload_response(response, False, False)

    time.sleep (1.5)
    repo_size = api.get_repo_size (repo.id)
    assert repo_size == 0

    #test upload the file to subdir whose parent is test when file already exists and replace is set.
    params = {'ret-json':'1'}
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/test',
                    'relative_path': 'subdir',
                    'replace': '1',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert response.status_code == 403

    #test upload the file to subdir when file already exists and replace is set.
    params = {'ret-json':'1'}
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/',
                    'relative_path': 'subdir',
                    'replace': '1',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert_upload_response(response, True, True)

    time.sleep (1.5)
    repo_size = api.get_repo_size (repo.id)
    assert repo_size == 0

    #unset test upload the file to subdir whose parent is test dir when file already exists and replace is unset.
    params = {'ret-json':'1'}
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/test',
                    'relative_path': 'subdir',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert response.status_code == 403

    #unset test upload the file to subdir when file already exists and replace is unset.
    params = {'ret-json':'1'}
    m = MultipartEncoder(
            fields={
                    'parent_dir': '/',
                    'relative_path': 'subdir',
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(upload_url_base, params = params,
                             data = m, headers = {'Content-Type': m.content_type})
    assert_upload_response(response, False, True)

    time.sleep (1.5)
    repo_size = api.get_repo_size (repo.id)
    assert repo_size == 0

    #test resumable upload file to test
    parent_dir = '/test'
    headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),
                                                      str(total_size)),
               'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)}
    response = request_resumable_upload(chunked_part1_path, headers, upload_url_base, parent_dir, False)
    assert_resumable_upload_response(response, repo.id,
                                     resumable_test_file_name, False)

    time.sleep (1.5)
    repo_size = api.get_repo_size (repo.id)
    assert repo_size == 0

    headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),
                                                       str(total_size - 1),
                                                       str(total_size)),
               'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)}
    response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False)
    assert response.status_code == 403

    #test resumable upload file to root dir
    parent_dir = '/'
    headers = {'Content-Range':'bytes 0-{}/{}'.format(str(len(chunked_part1_content) - 1),
                                                      str(total_size)),
               'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)}
    response = request_resumable_upload(chunked_part1_path,headers, upload_url_base,parent_dir, False)
    assert_resumable_upload_response(response, repo.id,
                                     resumable_file_name, False)

    repo_size = api.get_repo_size (repo.id)
    assert repo_size == 0

    headers = {'Content-Range':'bytes {}-{}/{}'.format(str(len(chunked_part1_content)),
                                                       str(total_size - 1),
                                                       str(total_size)),
               'Content-Disposition':'attachment; filename=\"{}\"'.format(resumable_file_name)}
    response = request_resumable_upload(chunked_part2_path, headers, upload_url_base, parent_dir, False)
    assert_resumable_upload_response(response, repo.id,
                                     resumable_file_name, True)

    time.sleep (1.5)
    repo_size = api.get_repo_size (repo.id)
    assert repo_size == total_size

    #test update file.
    write_file(file_path, file_content)
    token = api.get_fileserver_access_token(repo.id, obj_id, 'update', USER, False)
    update_url_base = 'http://127.0.0.1:8082/update-api/' + token
    m = MultipartEncoder(
            fields={
                    'target_file': '/' + file_name,
                    'file': (file_name, open(file_path, 'rb'), 'application/octet-stream')
            })
    response = requests.post(update_url_base,
                             data = m, headers = {'Content-Type': m.content_type})
    assert_update_response(response, False)

    time.sleep (1.5)
    repo_size = api.get_repo_size (repo.id)
    assert repo_size == total_size + file_size

    time.sleep(1)
    del_repo_files(repo.id)
    del_local_files()
Ejemplo n.º 10
0
def test_file_property_and_dir_listing():

    t_repo_version = 1
    t_repo_id = api.create_repo('test_file_property_and_dir_listing',
                                '',
                                USER,
                                passwd=None)

    create_the_file()

    api.post_file(t_repo_id, file_path, '/', file_name, USER)
    api.post_dir(t_repo_id, '/', dir_name, USER)
    api.post_file(t_repo_id, file_path, '/' + dir_name, file_name, USER)

    #test is_valid_filename
    t_valid_file_name = 'valid_filename'
    t_invalid_file_name = '/invalid_filename'
    assert api.is_valid_filename(t_repo_id, t_valid_file_name)
    assert api.is_valid_filename(t_repo_id, t_invalid_file_name) == 0

    #test get_file_id_by_path
    t_file_id = api.get_file_id_by_path(t_repo_id, '/test.txt')
    assert t_file_id

    #test get_dir_id_by_path
    t_dir_id = api.get_dir_id_by_path(t_repo_id, '/test_dir')
    assert t_dir_id

    #test get_file_size
    t_file_size = len(file_content)
    assert t_file_size == api.get_file_size(t_repo_id, t_repo_version,
                                            t_file_id)

    #test get_dir_size
    t_dir_size = len(file_content)
    assert t_dir_size == api.get_dir_size(t_repo_id, t_repo_version, t_dir_id)

    #test get_file_count_info_by_path
    t_file_count_info = api.get_file_count_info_by_path(t_repo_id, '/')
    assert t_file_count_info.file_count == 2
    assert t_file_count_info.dir_count == 1
    assert t_file_count_info.size == t_file_size + t_dir_size

    #test get_file_id_by_commit_and_path
    t_file_id_tmp = t_file_id
    t_repo = api.get_repo(t_repo_id)
    assert t_repo
    t_commit_id = t_repo.head_cmmt_id
    t_file_id = api.get_file_id_by_commit_and_path(t_repo_id, t_commit_id,
                                                   '/test.txt')

    assert t_file_id == t_file_id_tmp

    #test get_dirent_by_path
    std_file_mode = 0100000 | 0644
    t_dirent_obj = api.get_dirent_by_path(t_repo_id, '/test.txt')
    assert t_dirent_obj
    assert t_dirent_obj.obj_id == t_file_id
    assert t_dirent_obj.obj_name == 'test.txt'
    assert t_dirent_obj.mode == std_file_mode
    assert t_dirent_obj.version == t_repo_version
    assert t_dirent_obj.size == t_file_size
    assert t_dirent_obj.modifier == USER

    #test list_file_by_file_id
    t_block_list = api.list_file_by_file_id(t_repo_id, t_file_id)
    assert t_block_list

    #test list_blocks_by_file_id
    t_block_list = api.list_blocks_by_file_id(t_repo_id, t_file_id)
    assert t_block_list

    #test list_dir_by_dir_id
    t_dir_list = api.list_dir_by_dir_id(t_repo_id, t_dir_id)
    assert len(t_dir_list) == 1

    #test list_dir_by_path
    t_dir_list = api.list_dir_by_path(t_repo_id, '/test_dir')
    assert len(t_dir_list) == 1

    #test get_dir_id_by_commit_and_path
    t_dir_id = api.get_dir_id_by_commit_and_path(t_repo_id, t_commit_id,
                                                 '/test_dir')
    assert t_dir_id

    #test list_dir_by_commit_and_path
    t_dir_list = api.list_dir_by_commit_and_path(t_repo_id, t_commit_id,
                                                 '/test_dir')
    assert len(t_dir_list) == 1

    #test list_dir_with_perm
    t_dir_list = api.list_dir_with_perm(t_repo_id, '/test_dir', t_dir_id, USER)
    assert len(t_dir_list) == 1

    #test mkdir_with_parent
    api.mkdir_with_parents(t_repo_id, '/test_dir', 'test_subdir', USER)
    t_dir_id = api.get_dir_id_by_path(t_repo_id, '/test_dir/test_subdir')
    assert t_dir_id

    #test get_total_storage
    t_total_size = api.get_total_storage()
    t_repo_size = api.get_repo_size(t_repo_id)
    assert t_total_size == t_repo_size

    #get_total_file_number
    time.sleep(1)
    assert api.get_total_file_number() == 2

    api.remove_repo(t_repo_id)
Ejemplo n.º 11
0
    def _build_task(self, repo_id, owner, storage_path):
        """
        Archiving task builder
        """
        at = KeeperArchivingTask(repo_id, owner, storage_path)
        at.status = 'BUILD_TASK'
        # check repo exists
        try:
            at._repo = seafile_api.get_repo(repo_id)
        except Exception as e:
            _set_error(at, MSG_ADD_TASK,
                       'Cannot get library {}: {}'.format(repo_id, e))
            return at

        # check owner
        if not _is_repo_owner(repo_id, owner):
            _set_error(at, MSG_WRONG_OWNER,
                       'Wrong owner of library {}: {}'.format(repo_id, owner))
            return at
        at.owner = owner

        # check repo snapshot is already archived
        commit = get_commit(at._repo)
        if self._db_oper.is_snapshot_archived(repo_id, commit.commit_id):
            _set_error(
                at, MSG_SNAPSHOT_ALREADY_ARCHIVED,
                'Snapshot {} of library {} is already archived'.format(
                    commit.commit_id, repo_id))
            return at
        at._commit = commit

        # check version
        max_ver = self._db_oper.get_max_archive_version(repo_id, owner)
        if max_ver is None:
            _set_error(
                at, MSG_ADD_TASK,
                'Cannot get max version of archive for library {}: {}'.format(
                    repo_id, owner))
            return at
        owner_quota = self._db_oper.get_quota(
            repo_id, owner) or self.archives_per_library
        if max_ver >= owner_quota:
            _set_error(
                at, MSG_MAX_NUMBER_ARCHIVES_REACHED,
                'Max number of archives {} for library {} and owner {} is reached'
                .format(max_ver, repo_id, owner))
            return at
        elif max_ver == -1:
            at.version = 1
        else:
            at.version = max_ver + 1

        # check max repo size
        try:
            repo_size = seafile_api.get_repo_size(repo_id)
        except Exception as e:
            _set_error(at, MSG_ADD_TASK,
                       'Cannot get library size {}: {}'.format(repo_id, e))
            return at

        # check max repo size
        # TODO: check units
        if repo_size > self.archive_max_size:
            _set_error(
                at, MSG_LIBRARY_TOO_BIG,
                'Size of library {} is too big to be archived: {}.'.format(
                    repo_id, repo_size))
            return at

        return at
Ejemplo n.º 12
0
    def check_repo_archiving_status(self, repo_id, owner, action):
        """TODO:
        """
        if repo_id is None:
            return {'status': 'ERROR', 'error': 'No repo_id is defined.'}

        if owner is None:
            return {'status': 'ERROR', 'error': 'No owner is defined.'}

        if action is None:
            return {'status': 'ERROR', 'error': 'No action is defined.'}

        resp = {'repo_id': repo_id, 'owner': owner, 'action': action}
        try:
            repo = None
            try:
                repo = seafile_api.get_repo(repo_id)
            except:
                pass
            if repo is None:
                resp.update({
                    'status': 'ERROR',
                    'error': MSG_CANNOT_GET_REPO,
                })
                return resp

            if not _is_repo_owner(repo_id, owner):
                resp.update({'status': 'ERROR', 'error': MSG_WRONG_OWNER})
                return resp

            ####### is_snapshot_archived
            if action == 'is_snapshot_archived':

                # get root commit_id
                commit_id = get_commit(repo).commit_id
                is_archived = self._db_oper.is_snapshot_archived(
                    repo_id, commit_id)
                if is_archived is None:
                    resp.update({'status': 'ERROR', 'error': MSG_DB_ERROR})
                    return resp
                resp.update({
                    'is_snapshot_archived':
                    'true' if is_archived else 'false',
                })
                return resp

            ####### get_quota
            if action == 'get_quota':
                # get current version
                curr_ver = self._db_oper.get_max_archive_version(
                    repo_id, owner)
                if curr_ver is None:
                    resp.update({'status': 'ERROR', 'error': MSG_DB_ERROR})
                    return resp
                curr_ver = 0 if curr_ver == -1 else curr_ver
                # get quota from db or from config
                quota = self._db_oper.get_quota(
                    repo_id, owner) or self.archives_per_library
                resp.update({
                    'curr_ver': curr_ver,
                    'remains': quota - curr_ver
                })
                return resp

            ####### max_repo_size
            if action == 'is_repo_too_big':
                repo_size = seafile_api.get_repo_size(repo_id)
                resp.update({
                    'is_repo_too_big':
                    'true' if repo_size > self.archive_max_size else 'false',
                })
                return resp

            ##### no action found!!!!
            return {'status': 'ERROR', 'error': 'Unknown action: ' + action}

        except Exception as e:
            logger.error(ACTION_ERROR_MSG[action][0].format(repo_id, owner, e))
            resp.update({
                'status': 'ERROR',
                'error': ACTION_ERROR_MSG[action][1]
            })
            return resp
Ejemplo n.º 13
0
new_repo_id = seafile_api.create_repo(name=origin_repo.name,
                                      desc=origin_repo.desc,
                                      username=username,
                                      passwd=None)

#Copy stuffs from old library to new library
# dirents = seafile_api.list_dir_by_path(origin_repo_id, '/')
# for e in dirents:
#     print "copying: " + e.obj_name
#     obj_name = e.obj_name
#     seafile_api.copy_file(origin_repo_id, '/', obj_name, new_repo_id, '/',
#                           obj_name, username, 0, 1)

print "*" * 60
print "OK, verifying..."
print "Origin library(%s): %d files. New Library(%s): %d files." % (
    origin_repo_id[:8], count_files_recursive(origin_repo_id), new_repo_id[:8],
    count_files_recursive(new_repo_id))
print "*" * 60
# print seafile_api.get_repo(repo_id)
print seafile_api.get_repo_size(repo_id)

# import ccnet
# import seafile
# pool = ccnet.ClientPool(DEFAULT_CONF_DIR)
# ccnet_rpc = ccnet.CcnetRpcClient(pool)
# seafile_rpc = seafile.RpcClient(pool, req_pool=False)
# repos = seafile_rpc.get_repo_list(-1, -1)
# print "Name\tID\tPath"
# for repo in repos:
#     print repo.name, repo.id, repo.worktree
Ejemplo n.º 14
0
#Create a new library, set name, desc and owner
new_repo_id = seafile_api.create_repo(name=origin_repo.name,
                                      desc=origin_repo.desc,
                                      username=username, passwd=None)

#Copy stuffs from old library to new library
# dirents = seafile_api.list_dir_by_path(origin_repo_id, '/')
# for e in dirents:
#     print "copying: " + e.obj_name
#     obj_name = e.obj_name
#     seafile_api.copy_file(origin_repo_id, '/', obj_name, new_repo_id, '/',
#                           obj_name, username, 0, 1)

print "*" * 60
print "OK, verifying..."
print "Origin library(%s): %d files. New Library(%s): %d files." % (
    origin_repo_id[:8], count_files_recursive(origin_repo_id),
    new_repo_id[:8], count_files_recursive(new_repo_id))
print "*" * 60
# print seafile_api.get_repo(repo_id)
print seafile_api.get_repo_size(repo_id)

# import ccnet
# import seafile
# pool = ccnet.ClientPool(DEFAULT_CONF_DIR)
# ccnet_rpc = ccnet.CcnetRpcClient(pool)
# seafile_rpc = seafile.RpcClient(pool, req_pool=False)
# repos = seafile_rpc.get_repo_list(-1, -1)
# print "Name\tID\tPath"
# for repo in repos:
#     print repo.name, repo.id, repo.worktree