def serialize_comment(comment, auth, anonymous=False): return { 'id': comment._id, 'author': { 'id': privacy_info_handle(comment.user._id, anonymous), 'url': privacy_info_handle(comment.user.url, anonymous), 'name': privacy_info_handle( comment.user.fullname, anonymous, name=True ), 'gravatarUrl': privacy_info_handle( gravatar( comment.user, use_ssl=True, size=settings.GRAVATAR_SIZE_DISCUSSION ), anonymous ), }, 'dateCreated': comment.date_created.isoformat(), 'dateModified': comment.date_modified.isoformat(), 'content': comment.content, 'hasChildren': bool(getattr(comment, 'commented', [])), 'canEdit': comment.user == auth.user, 'modified': comment.modified, 'isDeleted': comment.is_deleted, 'isAbuse': auth.user and auth.user._id in comment.reports, }
def serialize_comment(comment, auth, anonymous=False): return { 'id': comment._id, 'author': { 'id': privacy_info_handle(comment.user._id, anonymous), 'url': privacy_info_handle(comment.user.url, anonymous), 'name': privacy_info_handle(comment.user.fullname, anonymous, name=True), 'gravatarUrl': privacy_info_handle( gravatar(comment.user, use_ssl=True, size=settings.GRAVATAR_SIZE_DISCUSSION), anonymous), }, 'dateCreated': comment.date_created.isoformat(), 'dateModified': comment.date_modified.isoformat(), 'content': comment.content, 'hasChildren': bool(getattr(comment, 'commented', [])), 'canEdit': comment.user == auth.user, 'modified': comment.modified, 'isDeleted': comment.is_deleted, 'isAbuse': auth.user and auth.user._id in comment.reports, }
def comment_discussion(auth, node, **kwargs): users = collect_discussion(node) anonymous = has_anonymous_link(node, auth) # Sort users by comment frequency # TODO: Allow sorting by recency, combination of frequency and recency sorted_users = sorted( users.keys(), key=lambda item: len(users[item]), reverse=True, ) return { 'discussion': [ { 'id': privacy_info_handle(user._id, anonymous), 'url': privacy_info_handle(user.url, anonymous), 'fullname': privacy_info_handle(user.fullname, anonymous, name=True), 'isContributor': node.is_contributor(user), 'gravatarUrl': privacy_info_handle( gravatar( user, use_ssl=True, size=settings.GRAVATAR_SIZE_DISCUSSION, ), anonymous ), } for user in sorted_users ] }
def comment_discussion(auth, node, **kwargs): users = collect_discussion(node) anonymous = has_anonymous_link(node, auth) # Sort users by comment frequency # TODO: Allow sorting by recency, combination of frequency and recency sorted_users = sorted( users.keys(), key=lambda item: len(users[item]), reverse=True, ) return { 'discussion': [{ 'id': privacy_info_handle(user._id, anonymous), 'url': privacy_info_handle(user.url, anonymous), 'fullname': privacy_info_handle(user.fullname, anonymous, name=True), 'isContributor': node.is_contributor(user), 'gravatarUrl': privacy_info_handle( gravatar( user, use_ssl=True, size=settings.GRAVATAR_SIZE_DISCUSSION, ), anonymous), } for user in sorted_users] }
def serialize(self, anonymous=False): return { 'id': utils.privacy_info_handle(self._primary_key, anonymous), 'fullname': utils.privacy_info_handle(self.fullname, anonymous, name=True), 'registered': self.is_registered, 'url': utils.privacy_info_handle(self.url, anonymous), 'api_url': utils.privacy_info_handle(self.api_url, anonymous), }
def serialize(self, anonymous=False): return { "id": utils.privacy_info_handle(self._primary_key, anonymous), "fullname": utils.privacy_info_handle(self.fullname, anonymous, name=True), "registered": self.is_registered, "url": utils.privacy_info_handle(self.url, anonymous), "api_url": utils.privacy_info_handle(self.api_url, anonymous), }
def file_info(auth, fid, **kwargs): versions = [] node = kwargs['node'] or kwargs['project'] file_name = fid file_name_clean = urlsafe_filename(file_name) files_page_url = node.web_url_for('collect_file_trees') latest_download_url = None api_url = None anonymous = has_anonymous_link(node, auth) try: files_versions = node.files_versions[file_name_clean] except KeyError: raise HTTPError(http.NOT_FOUND) latest_version_number = get_latest_version_number(file_name_clean, node) + 1 for idx, version in enumerate(list(reversed(files_versions))): node_file = NodeFile.load(version) number = len(files_versions) - idx unique, total = get_basic_counters('download:{}:{}:{}'.format( node._primary_key, file_name_clean, number, )) download_url = node_file.download_url(node) api_url = node_file.api_url(node) versions.append({ 'file_name': file_name, 'download_url': download_url, 'version_number': number, 'display_number': number if idx > 0 else 'current', 'modified_date': node_file.date_uploaded.strftime('%Y/%m/%d %I:%M %p'), 'downloads': total if total else 0, 'committer_name': privacy_info_handle( node_file.uploader.fullname, anonymous, name=True ), 'committer_url': privacy_info_handle(node_file.uploader.url, anonymous), }) if number == latest_version_number: latest_download_url = download_url return { 'node_title': node.title, 'file_name': file_name, 'versions': versions, 'registered': node.is_registration, 'urls': { 'api': api_url, 'files': files_page_url, 'latest': { 'download': latest_download_url, }, } }
def _get_wiki_versions(node, name, anonymous=False): key = to_mongo_key(name) # Skip if wiki_page doesn't exist; happens on new projects before # default "home" page is created if key not in node.wiki_pages_versions: return [] versions = [ NodeWikiPage.load(version_wiki_id) for version_wiki_id in node.wiki_pages_versions[key] ] return [{ 'version': version.version, 'user_fullname': privacy_info_handle(version.user.fullname, anonymous, name=True), 'date': version.date.replace(microsecond=0), 'compare_web_url': node.web_url_for('project_wiki_compare', wname=name, wver=version.version, _guid=True), } for version in reversed(versions)]
def dataverse_get_file_info(node_addon, auth, **kwargs): """API view that gets info for a file.""" node = node_addon.owner file_id = kwargs.get('path') fail_if_unauthorized(node_addon, auth, file_id) fail_if_private(file_id) anonymous = has_anonymous_link(node, auth) download_url = node.web_url_for('dataverse_download_file', path=file_id) dataverse_url = 'http://{0}/dvn/dv/'.format(HOST) + node_addon.dataverse_alias study_url = 'http://dx.doi.org/' + node_addon.study_hdl delete_url = node.api_url_for('dataverse_delete_file', path=file_id) data = { 'node': { 'id': node._id, 'title': node.title }, 'filename': scrape_dataverse(file_id, name_only=True)[0], 'dataverse': privacy_info_handle(node_addon.dataverse, anonymous), 'study': privacy_info_handle(node_addon.study, anonymous), 'urls': { 'dataverse': privacy_info_handle(dataverse_url, anonymous), 'study': privacy_info_handle(study_url, anonymous), 'download': privacy_info_handle(download_url, anonymous), 'delete': privacy_info_handle(delete_url, anonymous), 'files': node.web_url_for('collect_file_trees'), } } return {'data': data}, httplib.OK
def dataverse_get_file_info(node_addon, auth, **kwargs): """API view that gets info for a file.""" node = node_addon.owner file_id = kwargs.get('path') fail_if_unauthorized(node_addon, auth, file_id) fail_if_private(file_id) anonymous = has_anonymous_link(node, auth) download_url = node.web_url_for('dataverse_download_file', path=file_id) dataverse_url = 'http://{0}/dvn/dv/'.format( HOST) + node_addon.dataverse_alias study_url = 'http://dx.doi.org/' + node_addon.study_hdl delete_url = node.api_url_for('dataverse_delete_file', path=file_id) data = { 'node': { 'id': node._id, 'title': node.title }, 'filename': scrape_dataverse(file_id, name_only=True)[0], 'dataverse': privacy_info_handle(node_addon.dataverse, anonymous), 'study': privacy_info_handle(node_addon.study, anonymous), 'urls': { 'dataverse': privacy_info_handle(dataverse_url, anonymous), 'study': privacy_info_handle(study_url, anonymous), 'download': privacy_info_handle(download_url, anonymous), 'delete': privacy_info_handle(delete_url, anonymous), 'files': node.web_url_for('collect_file_trees'), } } return {'data': data}, httplib.OK
def _get_wiki_versions(node, name, anonymous=False): # Skip if wiki_page doesn't exist; happens on new projects before # default "home" page is created wiki_page = WikiPage.objects.get_for_node(node, name) if wiki_page: versions = wiki_page.get_versions() else: return [] return [ { 'version': version.identifier, 'user_fullname': privacy_info_handle(version.user.fullname, anonymous, name=True), 'date': '{} UTC'.format(version.created.replace(microsecond=0).isoformat().replace('T', ' ')), } for version in versions ]
def _get_wiki_versions(node, name, anonymous=False): key = to_mongo_key(name) # Skip if wiki_page doesn't exist; happens on new projects before # default "home" page is created if key not in node.wiki_pages_versions: return [] versions = [NodeWikiPage.load(version_wiki_id) for version_wiki_id in node.wiki_pages_versions[key]] return [ { "version": version.version, "user_fullname": privacy_info_handle(version.user.fullname, anonymous, name=True), "date": version.date.replace(microsecond=0).isoformat(), } for version in reversed(versions) ]
def _get_wiki_versions(node, name, anonymous=False): key = to_mongo_key(name) # Skip if wiki_page doesn't exist; happens on new projects before # default "home" page is created if key not in node.wiki_pages_versions: return [] versions = [ NodeWikiPage.load(version_wiki_id) for version_wiki_id in node.wiki_pages_versions[key] ] return [ { 'version': version.version, 'user_fullname': privacy_info_handle(version.user.fullname, anonymous, name=True), 'date': '{} UTC'.format(version.date.replace(microsecond=0).isoformat().replace('T', ' ')), } for version in reversed(versions) ]
def _get_wiki_versions(node, name, anonymous=False): key = to_mongo_key(name) # Skip if wiki_page doesn't exist; happens on new projects before # default "home" page is created if key not in node.wiki_pages_versions: return [] versions = [ NodeWikiPage.load(version_wiki_id) for version_wiki_id in node.wiki_pages_versions[key] ] return [ { 'version': version.version, 'user_fullname': privacy_info_handle(version.user.fullname, anonymous, name=True), 'date': version.date.replace(microsecond=0), 'compare_web_url': node.web_url_for('project_wiki_compare', wname=name, wver=version.version, _guid=True), } for version in reversed(versions) ]
def figshare_view_file(*args, **kwargs): auth = kwargs['auth'] node = kwargs['node'] or kwargs['project'] node_settings = kwargs['node_addon'] article_id = kwargs.get('aid') or None file_id = kwargs.get('fid') or None anonymous = has_anonymous_link(node, auth) if not article_id or not file_id: raise HTTPError(http.NOT_FOUND) connect = Figshare.from_settings(node_settings.user_settings) if node_settings.figshare_type == 'project': item = connect.project(node_settings, node_settings.figshare_id) else: item = connect.article(node_settings, node_settings.figshare_id) if article_id not in str(item): raise HTTPError(http.NOT_FOUND) article = connect.article(node_settings, article_id) found = False for f in article['items'][0]['files']: if f['id'] == int(file_id): found = f break if not found: raise HTTPError(http.NOT_FOUND) try: # If GUID has already been created, we won't redirect, and can check # whether the file exists below guid = FigShareGuidFile.find_one( Q('node', 'eq', node) & Q('article_id', 'eq', article_id) & Q('file_id', 'eq', file_id)) except: guid = FigShareGuidFile(node=node, article_id=article_id, file_id=file_id) guid.save() redirect_url = check_file_guid(guid) if redirect_url: return redirect(redirect_url) private = not (article['items'][0]['status'] == 'Public') figshare_url = 'http://figshare.com/' if private: figshare_url += 'preview/_preview/{0}'.format( article['items'][0]['article_id']) else: figshare_url += 'articles/{0}/{1}'.format( article['items'][0]['title'].replace(' ', '_'), article['items'][0]['article_id']) version_url = "http://figshare.com/articles/{filename}/{file_id}".format( filename=article['items'][0]['title'], file_id=article['items'][0]['article_id']) download_url = node.api_url + 'figshare/download/article/{aid}/file/{fid}'.format( aid=article_id, fid=file_id) render_url = node.api_url + \ 'figshare/render/article/{aid}/file/{fid}'.format(aid=article_id, fid=file_id) delete_url = node.api_url + 'figshare/article/{aid}/file/{fid}/'.format( aid=article_id, fid=file_id) filename = found['name'] cache_file_name = get_cache_file(article_id, file_id) rendered = get_cache_content(node_settings, cache_file_name) if private: rendered = messages.FIGSHARE_VIEW_FILE_PRIVATE.format( url='http://figshare.com/') elif rendered is None: filename, size, filedata = connect.get_file(node_settings, found) if figshare_settings.MAX_RENDER_SIZE is not None and size > figshare_settings.MAX_RENDER_SIZE: rendered = messages.FIGSHARE_VIEW_FILE_OVERSIZED.format( url=found.get('download_url')) else: rendered = get_cache_content( node_settings, cache_file_name, start_render=True, remote_path=filename, file_content=filedata, download_url=download_url, ) # categories = connect.categories()['items'] # TODO Cache this # categories = ''.join( # ["<option value='{val}'>{label}</option>".format(val=i['id'], label=i['name']) for i in categories]) rv = { 'node': { 'id': node._id, 'title': node.title }, 'file_name': filename, 'rendered': rendered, 'file_status': article['items'][0]['status'], 'file_version': article['items'][0]['version'], 'doi': 'http://dx.doi.org/10.6084/m9.figshare.{0}'.format( article['items'][0]['article_id']), 'parent_type': 'fileset' if article['items'][0]['defined_type'] == 'fileset' else 'singlefile', 'parent_id': article['items'][0]['article_id'], # 'figshare_categories': categories, 'figshare_title': article['items'][0]['title'], 'figshare_desc': article['items'][0]['description'], 'render_url': render_url, 'urls': { 'render': render_url, 'download': found.get('download_url'), 'version': version_url, 'figshare': privacy_info_handle(figshare_url, anonymous), 'delete': delete_url, 'files': node.web_url_for('collect_file_trees') } } rv.update(_view_project(node, auth, primary=True)) return rv
def figshare_view_file(*args, **kwargs): auth = kwargs['auth'] node = kwargs['node'] or kwargs['project'] node_settings = kwargs['node_addon'] article_id = kwargs.get('aid') or None file_id = kwargs.get('fid') or None anonymous = has_anonymous_link(node, auth) if not article_id or not file_id: raise HTTPError(http.NOT_FOUND) connect = Figshare.from_settings(node_settings.user_settings) if node_settings.figshare_type == 'project': item = connect.project(node_settings, node_settings.figshare_id) else: item = connect.article(node_settings, node_settings.figshare_id) if article_id not in str(item): raise HTTPError(http.NOT_FOUND) article = connect.article(node_settings, article_id) found = False for f in article['items'][0]['files']: if f['id'] == int(file_id): found = f break if not found: raise HTTPError(http.NOT_FOUND) try: # If GUID has already been created, we won't redirect, and can check # whether the file exists below guid = FigShareGuidFile.find_one( Q('node', 'eq', node) & Q('article_id', 'eq', article_id) & Q('file_id', 'eq', file_id) ) except: guid = FigShareGuidFile(node=node, article_id=article_id, file_id=file_id) guid.save() redirect_url = check_file_guid(guid) if redirect_url: return redirect(redirect_url) private = not(article['items'][0]['status'] == 'Public') figshare_url = 'http://figshare.com/' if private: figshare_url += 'preview/_preview/{0}'.format(article['items'][0]['article_id']) else: figshare_url += 'articles/{0}/{1}'.format(article['items'][0]['title'].replace(' ', '_'), article['items'][0]['article_id']) version_url = "http://figshare.com/articles/{filename}/{file_id}".format( filename=article['items'][0]['title'], file_id=article['items'][0]['article_id']) download_url = node.api_url + 'figshare/download/article/{aid}/file/{fid}'.format(aid=article_id, fid=file_id) render_url = node.api_url + \ 'figshare/render/article/{aid}/file/{fid}'.format(aid=article_id, fid=file_id) delete_url = node.api_url + 'figshare/article/{aid}/file/{fid}/'.format(aid=article_id, fid=file_id) filename = found['name'] cache_file_name = get_cache_file( article_id, file_id ) rendered = get_cache_content(node_settings, cache_file_name) if private: rendered = messages.FIGSHARE_VIEW_FILE_PRIVATE.format(url='http://figshare.com/') elif rendered is None: filename, size, filedata = connect.get_file(node_settings, found) if figshare_settings.MAX_RENDER_SIZE is not None and size > figshare_settings.MAX_RENDER_SIZE: rendered = messages.FIGSHARE_VIEW_FILE_OVERSIZED.format( url=found.get('download_url')) else: rendered = get_cache_content( node_settings, cache_file_name, start_render=True, remote_path=filename, file_content=filedata, download_url=download_url, ) categories = connect.categories()['items'] # TODO Cache this categories = ''.join( ["<option value='{val}'>{label}</option>".format(val=i['id'], label=i['name']) for i in categories]) rv = { 'node': { 'id': node._id, 'title': node.title }, 'file_name': filename, 'rendered': rendered, 'file_status': article['items'][0]['status'], 'file_version': article['items'][0]['version'], 'doi': 'http://dx.doi.org/10.6084/m9.figshare.{0}'.format(article['items'][0]['article_id']), 'parent_type': 'fileset' if article['items'][0]['defined_type'] == 'fileset' else 'singlefile', 'parent_id': article['items'][0]['article_id'], 'figshare_categories': categories, 'figshare_title': article['items'][0]['title'], 'figshare_desc': article['items'][0]['description'], 'urls': { 'render': render_url, 'download': found.get('download_url'), 'version': version_url, 'figshare': privacy_info_handle(figshare_url, anonymous), 'delete': delete_url, 'files': node.web_url_for('collect_file_trees') } } rv.update(_view_project(node, auth, primary=True)) return rv