def list_shared_links(request): """List share links, and remove invalid links(file/dir is deleted or moved). """ username = request.user.username fileshares = FileShare.objects.filter(username=username) p_fileshares = [] # personal file share for fs in fileshares: if is_personal_repo(fs.repo_id): # only list files in personal repos r = seafile_api.get_repo(fs.repo_id) if not r: fs.delete() continue if fs.s_type == 'f': if seafile_api.get_file_id_by_path(r.id, fs.path) is None: fs.delete() continue fs.filename = os.path.basename(fs.path) fs.shared_link = gen_file_share_link(fs.token) else: if seafile_api.get_dir_id_by_path(r.id, fs.path) is None: fs.delete() continue fs.filename = os.path.basename(fs.path.rstrip('/')) fs.shared_link = gen_dir_share_link(fs.token) fs.repo = r p_fileshares.append(fs) return render_to_response('repo/shared_links.html', { "fileshares": p_fileshares, }, context_instance=RequestContext(request))
def list_shared_links(request): """List shared links, and remove invalid links(file/dir is deleted or moved). """ username = request.user.username # download links fileshares = FileShare.objects.filter(username=username) p_fileshares = [] # personal file share for fs in fileshares: r = seafile_api.get_repo(fs.repo_id) if not r: fs.delete() continue if fs.s_type == 'f': if seafile_api.get_file_id_by_path(r.id, fs.path) is None: fs.delete() continue fs.filename = os.path.basename(fs.path) fs.shared_link = gen_file_share_link(fs.token) else: if seafile_api.get_dir_id_by_path(r.id, fs.path) is None: fs.delete() continue fs.filename = os.path.basename(fs.path.rstrip('/')) fs.shared_link = gen_dir_share_link(fs.token) fs.repo = r p_fileshares.append(fs) # upload links uploadlinks = UploadLinkShare.objects.filter(username=username) p_uploadlinks = [] for link in uploadlinks: r = seafile_api.get_repo(link.repo_id) if not r: link.delete() continue if seafile_api.get_dir_id_by_path(r.id, link.path) is None: link.delete() continue link.dir_name = os.path.basename(link.path.rstrip('/')) link.shared_link = gen_shared_upload_link(link.token) link.repo = r p_uploadlinks.append(link) return render_to_response('share/links.html', { "fileshares": p_fileshares, "uploadlinks": p_uploadlinks, }, context_instance=RequestContext(request))
def get_repo_dirents(request, repo, commit, path, offset=-1, limit=-1): """List repo dirents based on commit id and path. Use ``offset`` and ``limit`` to do paginating. Returns: A tupple of (file_list, dir_list, dirent_more) TODO: Some unrelated parts(file sharing, stars, modified info, etc) need to be pulled out to multiple functions. """ dir_list = [] file_list = [] dirent_more = False if commit.root_id == EMPTY_SHA1: return ([], [], False) if limit == -1 else ([], [], False) else: try: dirs = seafile_api.list_dir_by_commit_and_path( commit.repo_id, commit.id, path, offset, limit) if not dirs: return ([], [], False) except SearpcError as e: logger.error(e) return ([], [], False) if limit != -1 and limit == len(dirs): dirent_more = True username = request.user.username starred_files = get_dir_starred_files(username, repo.id, path) fileshares = FileShare.objects.filter(repo_id=repo.id).filter( username=username) uploadlinks = UploadLinkShare.objects.filter(repo_id=repo.id).filter( username=username) view_dir_base = reverse("view_common_lib_dir", args=[repo.id, '']) dl_dir_base = reverse('repo_download_dir', args=[repo.id]) file_history_base = reverse('file_revisions', args=[repo.id]) for dirent in dirs: dirent.last_modified = dirent.mtime dirent.sharelink = '' dirent.uploadlink = '' if stat.S_ISDIR(dirent.props.mode): dpath = posixpath.join(path, dirent.obj_name) if dpath[-1] != '/': dpath += '/' for share in fileshares: if dpath == share.path: dirent.sharelink = gen_dir_share_link(share.token) dirent.sharetoken = share.token break for link in uploadlinks: if dpath == link.path: dirent.uploadlink = gen_shared_upload_link(link.token) dirent.uploadtoken = link.token break p_dpath = posixpath.join(path, dirent.obj_name) dirent.view_link = view_dir_base + '?p=' + urlquote(p_dpath) dirent.dl_link = dl_dir_base + '?p=' + urlquote(p_dpath) dir_list.append(dirent) else: file_list.append(dirent) if repo.version == 0: dirent.file_size = get_file_size(repo.store_id, repo.version, dirent.obj_id) else: dirent.file_size = dirent.size dirent.starred = False fpath = posixpath.join(path, dirent.obj_name) p_fpath = posixpath.join(path, dirent.obj_name) dirent.view_link = reverse('view_lib_file', args=[repo.id, p_fpath]) dirent.dl_link = get_file_download_link( repo.id, dirent.obj_id, p_fpath) dirent.history_link = file_history_base + '?p=' + urlquote( p_fpath) if fpath in starred_files: dirent.starred = True for share in fileshares: if fpath == share.path: dirent.sharelink = gen_file_share_link(share.token) dirent.sharetoken = share.token break return (file_list, dir_list, dirent_more)
def get_repo_dirents(request, repo, commit, path, offset=-1, limit=-1): """List repo dirents based on commit id and path. Use ``offset`` and ``limit`` to do paginating. Returns: A tupple of (file_list, dir_list, dirent_more) TODO: Some unrelated parts(file sharing, stars, modified info, etc) need to be pulled out to multiple functions. """ dir_list = [] file_list = [] dirent_more = False if commit.root_id == EMPTY_SHA1: return ([], [], False) if limit == -1 else ([], [], False) else: try: dirs = seafile_api.list_dir_by_commit_and_path(commit.repo_id, commit.id, path, offset, limit) if not dirs: return ([], [], False) except SearpcError as e: logger.error(e) return ([], [], False) if limit != -1 and limit == len(dirs): dirent_more = True username = request.user.username starred_files = get_dir_starred_files(username, repo.id, path) fileshares = FileShare.objects.filter(repo_id=repo.id).filter(username=username) uploadlinks = UploadLinkShare.objects.filter(repo_id=repo.id).filter(username=username) view_dir_base = reverse("view_common_lib_dir", args=[repo.id, '']) dl_dir_base = reverse('repo_download_dir', args=[repo.id]) file_history_base = reverse('file_revisions', args=[repo.id]) for dirent in dirs: dirent.last_modified = dirent.mtime dirent.sharelink = '' dirent.uploadlink = '' if stat.S_ISDIR(dirent.props.mode): dpath = posixpath.join(path, dirent.obj_name) if dpath[-1] != '/': dpath += '/' for share in fileshares: if dpath == share.path: dirent.sharelink = gen_dir_share_link(share.token) dirent.sharetoken = share.token break for link in uploadlinks: if dpath == link.path: dirent.uploadlink = gen_shared_upload_link(link.token) dirent.uploadtoken = link.token break p_dpath = posixpath.join(path, dirent.obj_name) dirent.view_link = view_dir_base + '?p=' + urlquote(p_dpath) dirent.dl_link = dl_dir_base + '?p=' + urlquote(p_dpath) dir_list.append(dirent) else: file_list.append(dirent) if repo.version == 0: dirent.file_size = get_file_size(repo.store_id, repo.version, dirent.obj_id) else: dirent.file_size = dirent.size dirent.starred = False fpath = posixpath.join(path, dirent.obj_name) p_fpath = posixpath.join(path, dirent.obj_name) dirent.view_link = reverse('view_lib_file', args=[repo.id, p_fpath]) dirent.dl_link = get_file_download_link(repo.id, dirent.obj_id, p_fpath) dirent.history_link = file_history_base + '?p=' + urlquote(p_fpath) if fpath in starred_files: dirent.starred = True for share in fileshares: if fpath == share.path: dirent.sharelink = gen_file_share_link(share.token) dirent.sharetoken = share.token break return (file_list, dir_list, dirent_more)
def view_file(request, repo_id): """ Steps to view file: 1. Get repo id and file path. 2. Check user's permission. 3. Check whether this file can be viewed online. 4.1 Get file content if file is text file. 4.2 Prepare flash if file is document. 4.3 Prepare or use pdfjs if file is pdf. 4.4 Other file return it's raw path. """ username = request.user.username # check arguments repo = get_repo(repo_id) if not repo: raise Http404 path = request.GET.get('p', '/').rstrip('/') obj_id = get_file_id_by_path(repo_id, path) if not obj_id: return render_error(request, _(u'File does not exist')) # construct some varibles u_filename = os.path.basename(path) current_commit = get_commits(repo_id, 0, 1)[0] # Check whether user has permission to view file and get file raw path, # render error page if permission deny. raw_path, inner_path, user_perm = get_file_view_path_and_perm( request, repo_id, obj_id, path) if not user_perm: return render_permission_error(request, _(u'Unable to view file')) # check if the user is the owner or not, for 'private share' if is_org_context(request): repo_owner = seafile_api.get_org_repo_owner(repo.id) is_repo_owner = True if repo_owner == username else False else: is_repo_owner = seafile_api.is_repo_owner(username, repo.id) # get file type and extension filetype, fileext = get_file_type_and_ext(u_filename) img_prev = None img_next = None ret_dict = { 'err': '', 'file_content': '', 'encoding': '', 'file_enc': '', 'file_encoding_list': [], 'html_exists': False, 'filetype': filetype } fsize = get_file_size(repo.store_id, repo.version, obj_id) exceeds_limit, err_msg = file_size_exceeds_preview_limit(fsize, filetype) if exceeds_limit: ret_dict['err'] = err_msg else: """Choose different approach when dealing with different type of file.""" if is_textual_file(file_type=filetype): handle_textual_file(request, filetype, inner_path, ret_dict) if filetype == MARKDOWN: c = ret_dict['file_content'] ret_dict['file_content'] = convert_md_link( c, repo_id, username) elif filetype == DOCUMENT: handle_document(inner_path, obj_id, fileext, ret_dict) elif filetype == SPREADSHEET: handle_spreadsheet(inner_path, obj_id, fileext, ret_dict) elif filetype == OPENDOCUMENT: if fsize == 0: ret_dict['err'] = _(u'Invalid file format.') elif filetype == PDF: handle_pdf(inner_path, obj_id, fileext, ret_dict) elif filetype == IMAGE: parent_dir = os.path.dirname(path) dirs = seafile_api.list_dir_by_commit_and_path( current_commit.repo_id, current_commit.id, parent_dir) if not dirs: raise Http404 img_list = [] for dirent in dirs: if not stat.S_ISDIR(dirent.props.mode): fltype, flext = get_file_type_and_ext(dirent.obj_name) if fltype == 'Image': img_list.append(dirent.obj_name) if len(img_list) > 1: img_list.sort(lambda x, y: cmp(x.lower(), y.lower())) cur_img_index = img_list.index(u_filename) if cur_img_index != 0: img_prev = posixpath.join(parent_dir, img_list[cur_img_index - 1]) if cur_img_index != len(img_list) - 1: img_next = posixpath.join(parent_dir, img_list[cur_img_index + 1]) else: pass # generate file path navigator zipped = gen_path_link(path, repo.name) # file shared link l = FileShare.objects.filter(repo_id=repo_id).filter( username=username).filter(path=path) fileshare = l[0] if len(l) > 0 else None http_or_https = request.is_secure() and 'https' or 'http' domain = RequestSite(request).domain if fileshare: file_shared_link = gen_file_share_link(fileshare.token) else: file_shared_link = '' for g in request.user.joined_groups: g.avatar = grp_avatar(g.id, 20) """List repo groups""" # Get groups this repo is shared. if request.user.org: org_id = request.user.org.org_id repo_shared_groups = get_org_groups_by_repo(org_id, repo_id) else: repo_shared_groups = get_shared_groups_by_repo(repo_id) # Filter out groups that user in joined. groups = [x for x in repo_shared_groups if is_group_user(x.id, username)] if len(groups) > 1: ctx = {} ctx['groups'] = groups repogrp_str = render_to_string("snippets/repo_group_list.html", ctx) else: repogrp_str = '' file_path_hash = hashlib.md5(urllib2.quote( path.encode('utf-8'))).hexdigest()[:12] # fetch file contributors and latest contributor contributors, last_modified, last_commit_id = \ FileContributors.objects.get_file_contributors( repo_id, path.encode('utf-8'), file_path_hash, obj_id) latest_contributor = contributors[0] if contributors else None # check whether file is starred is_starred = False org_id = -1 if request.user.org: org_id = request.user.org.org_id is_starred = is_file_starred(username, repo.id, path.encode('utf-8'), org_id) template = 'view_file_%s.html' % ret_dict['filetype'].lower() return render_to_response( template, { 'repo': repo, 'is_repo_owner': is_repo_owner, 'obj_id': obj_id, 'filename': u_filename, 'path': path, 'zipped': zipped, 'current_commit': current_commit, 'fileext': fileext, 'raw_path': raw_path, 'fileshare': fileshare, 'protocol': http_or_https, 'domain': domain, 'file_shared_link': file_shared_link, 'err': ret_dict['err'], 'file_content': ret_dict['file_content'], 'file_enc': ret_dict['file_enc'], 'encoding': ret_dict['encoding'], 'file_encoding_list': ret_dict['file_encoding_list'], 'html_exists': ret_dict['html_exists'], 'html_detail': ret_dict.get('html_detail', {}), 'filetype': ret_dict['filetype'], 'groups': groups, 'use_pdfjs': USE_PDFJS, 'contributors': contributors, 'latest_contributor': latest_contributor, 'last_modified': last_modified, 'last_commit_id': last_commit_id, 'repo_group_str': repogrp_str, 'is_starred': is_starred, 'user_perm': user_perm, 'img_prev': img_prev, 'img_next': img_next, 'highlight_keyword': settings.HIGHLIGHT_KEYWORD, }, context_instance=RequestContext(request))
def list_shared_links(request): """List shared links, and remove invalid links(file/dir is deleted or moved). """ username = request.user.username # download links fileshares = FileShare.objects.filter(username=username) fs_files, fs_dirs = [], [] for fs in fileshares: r = seafile_api.get_repo(fs.repo_id) if not r: fs.delete() continue if fs.is_file_share_link(): if seafile_api.get_file_id_by_path(r.id, fs.path) is None: fs.delete() continue fs.filename = os.path.basename(fs.path) fs.shared_link = gen_file_share_link(fs.token) else: if seafile_api.get_dir_id_by_path(r.id, fs.path) is None: fs.delete() continue if fs.path != '/': fs.filename = os.path.basename(fs.path.rstrip('/')) else: fs.filename = fs.path fs.shared_link = gen_dir_share_link(fs.token) fs.repo = r if fs.expire_date is not None and timezone.now() > fs.expire_date: fs.is_expired = True fs_files.append(fs) if fs.is_file_share_link() else fs_dirs.append(fs) fs_files.sort(lambda x, y: cmp(x.filename, y.filename)) fs_dirs.sort(lambda x, y: cmp(x.filename, y.filename)) # upload links uploadlinks = UploadLinkShare.objects.filter(username=username) p_uploadlinks = [] for link in uploadlinks: r = seafile_api.get_repo(link.repo_id) if not r: link.delete() continue if seafile_api.get_dir_id_by_path(r.id, link.path) is None: link.delete() continue if link.path != '/': link.dir_name = os.path.basename(link.path.rstrip('/')) else: link.dir_name = link.path link.shared_link = gen_shared_upload_link(link.token) link.repo = r p_uploadlinks.append(link) p_uploadlinks.sort(lambda x, y: cmp(x.dir_name, y.dir_name)) return render_to_response('share/links.html', { "fileshares": fs_dirs + fs_files, "uploadlinks": p_uploadlinks, }, context_instance=RequestContext(request))
def view_file(request, repo_id): """ Steps to view file: 1. Get repo id and file path. 2. Check user's permission. 3. Check whether this file can be viewed online. 4.1 Get file content if file is text file. 4.2 Prepare flash if file is document. 4.3 Prepare or use pdfjs if file is pdf. 4.4 Other file return it's raw path. """ username = request.user.username # check arguments repo = get_repo(repo_id) if not repo: raise Http404 path = request.GET.get('p', '/').rstrip('/') obj_id = get_file_id_by_path(repo_id, path) if not obj_id: return render_error(request, _(u'File does not exist')) # construct some varibles u_filename = os.path.basename(path) current_commit = get_commits(repo_id, 0, 1)[0] # Check whether user has permission to view file and get file raw path, # render error page if permission deny. raw_path, inner_path, user_perm = get_file_view_path_and_perm(request, repo_id, obj_id, path) if not user_perm: return render_permission_error(request, _(u'Unable to view file')) # check if the user is the owner or not, for 'private share' if is_org_context(request): repo_owner = seafile_api.get_org_repo_owner(repo.id) is_repo_owner = True if repo_owner == username else False else: is_repo_owner = seafile_api.is_repo_owner(username, repo.id) # get file type and extension filetype, fileext = get_file_type_and_ext(u_filename) img_prev = None img_next = None ret_dict = {'err': '', 'file_content': '', 'encoding': '', 'file_enc': '', 'file_encoding_list': [], 'html_exists': False, 'filetype': filetype} fsize = get_file_size(repo.store_id, repo.version, obj_id) exceeds_limit, err_msg = file_size_exceeds_preview_limit(fsize, filetype) if exceeds_limit: ret_dict['err'] = err_msg else: """Choose different approach when dealing with different type of file.""" if is_textual_file(file_type=filetype): handle_textual_file(request, filetype, inner_path, ret_dict) if filetype == MARKDOWN: c = ret_dict['file_content'] ret_dict['file_content'] = convert_md_link(c, repo_id, username) elif filetype == DOCUMENT: handle_document(inner_path, obj_id, fileext, ret_dict) elif filetype == SPREADSHEET: handle_spreadsheet(inner_path, obj_id, fileext, ret_dict) elif filetype == OPENDOCUMENT: if fsize == 0: ret_dict['err'] = _(u'Invalid file format.') elif filetype == PDF: handle_pdf(inner_path, obj_id, fileext, ret_dict) elif filetype == IMAGE: parent_dir = os.path.dirname(path) dirs = seafile_api.list_dir_by_commit_and_path(current_commit.repo_id, current_commit.id, parent_dir) if not dirs: raise Http404 img_list = [] for dirent in dirs: if not stat.S_ISDIR(dirent.props.mode): fltype, flext = get_file_type_and_ext(dirent.obj_name) if fltype == 'Image': img_list.append(dirent.obj_name) if len(img_list) > 1: img_list.sort(lambda x, y : cmp(x.lower(), y.lower())) cur_img_index = img_list.index(u_filename) if cur_img_index != 0: img_prev = posixpath.join(parent_dir, img_list[cur_img_index - 1]) if cur_img_index != len(img_list) - 1: img_next = posixpath.join(parent_dir, img_list[cur_img_index + 1]) else: pass # generate file path navigator zipped = gen_path_link(path, repo.name) # file shared link l = FileShare.objects.filter(repo_id=repo_id).filter( username=username).filter(path=path) fileshare = l[0] if len(l) > 0 else None http_or_https = request.is_secure() and 'https' or 'http' domain = RequestSite(request).domain if fileshare: file_shared_link = gen_file_share_link(fileshare.token) else: file_shared_link = '' for g in request.user.joined_groups: g.avatar = grp_avatar(g.id, 20) """List repo groups""" # Get groups this repo is shared. if request.user.org: org_id = request.user.org.org_id repo_shared_groups = get_org_groups_by_repo(org_id, repo_id) else: repo_shared_groups = get_shared_groups_by_repo(repo_id) # Filter out groups that user in joined. groups = [x for x in repo_shared_groups if is_group_user(x.id, username)] if len(groups) > 1: ctx = {} ctx['groups'] = groups repogrp_str = render_to_string("snippets/repo_group_list.html", ctx) else: repogrp_str = '' file_path_hash = hashlib.md5(urllib2.quote(path.encode('utf-8'))).hexdigest()[:12] # fetch file contributors and latest contributor contributors, last_modified, last_commit_id = \ FileContributors.objects.get_file_contributors( repo_id, path.encode('utf-8'), file_path_hash, obj_id) latest_contributor = contributors[0] if contributors else None # check whether file is starred is_starred = False org_id = -1 if request.user.org: org_id = request.user.org.org_id is_starred = is_file_starred(username, repo.id, path.encode('utf-8'), org_id) template = 'view_file_%s.html' % ret_dict['filetype'].lower() return render_to_response(template, { 'repo': repo, 'is_repo_owner': is_repo_owner, 'obj_id': obj_id, 'filename': u_filename, 'path': path, 'zipped': zipped, 'current_commit': current_commit, 'fileext': fileext, 'raw_path': raw_path, 'fileshare': fileshare, 'protocol': http_or_https, 'domain': domain, 'file_shared_link': file_shared_link, 'err': ret_dict['err'], 'file_content': ret_dict['file_content'], 'file_enc': ret_dict['file_enc'], 'encoding': ret_dict['encoding'], 'file_encoding_list': ret_dict['file_encoding_list'], 'html_exists': ret_dict['html_exists'], 'html_detail': ret_dict.get('html_detail', {}), 'filetype': ret_dict['filetype'], 'groups': groups, 'use_pdfjs': USE_PDFJS, 'contributors': contributors, 'latest_contributor': latest_contributor, 'last_modified': last_modified, 'last_commit_id': last_commit_id, 'repo_group_str': repogrp_str, 'is_starred': is_starred, 'user_perm': user_perm, 'img_prev': img_prev, 'img_next': img_next, 'highlight_keyword': settings.HIGHLIGHT_KEYWORD, }, context_instance=RequestContext(request))