def render_content(content): allow_html = p.toolkit.asbool(config.get('ckanext.pages.allow_html', False)) try: return h.render_markdown(content, allow_html=allow_html) except TypeError: # allow_html is only available in CKAN >= 2.3 return h.render_markdown(content)
def additional_fields(self, dataset_ref, dataset_dict): # identifier dataset_url = url_for('dataset_read', id=dataset_dict['name'], qualified=True) self.g.add((dataset_ref, SCHEMA.identifier, Literal(dataset_url))) # text bemerkungen = render_markdown(dataset_dict.get('sszBemerkungen', '')) self.g.add((dataset_ref, SCHEMA.text, Literal(bemerkungen))) # description (render markdown) notes = render_markdown(dataset_dict.get('notes', '')) self.g.remove((dataset_ref, SCHEMA.description, None)) self.g.add((dataset_ref, SCHEMA.description, Literal(notes))) # sourceOrganization author = dataset_dict.get('author', '') self.g.add((dataset_ref, SCHEMA.sourceOrganization, Literal(author))) # author data_publisher = dataset_dict.get('data_publisher', '') self.g.add((dataset_ref, SCHEMA.author, Literal(data_publisher))) # spatialRelationship spatial = dataset_dict.get('spatialRelationship', '') if spatial: # add spatialRelationship as literal ("named location") self.g.add((dataset_ref, SCHEMA.spatialCoverage, Literal(spatial)))
def test_ampersand_in_links(self): data = u'[link](/url?a=1&b=2)' output = u'<p><a href="/url?a=1&b=2">link</a></p>' eq_(h.render_markdown(data), output) data = u'http://example.com/page?a=1&b=2' output = u'<p><a href="http://example.com/page?a=1&b=2" target="_blank" rel="nofollow">http://example.com/page?a=1&b=2</a></p>' eq_(h.render_markdown(data), output)
def render_package(cls, pkg, context): '''Prepares for rendering a package. Takes a Package object and formats it for the various context variables required to call render. Note that the actual calling of render('package/read') is left to the caller.''' c.pkg_notes_formatted = h.render_markdown(pkg.get('notes')) c.current_rating, c.num_ratings = ckan.rating.get_rating(context['package']) url = pkg.get('url', '') c.pkg_url_link = h.link_to(url, url, rel='foaf:homepage', target='_blank') \ if url else _("No web page given") c.pkg_author_link = cls._person_email_link( name=pkg.get('author'), email=pkg.get('author_email'), fallback=_("Author not given")) c.pkg_maintainer_link = cls._person_email_link( name=pkg.get('maintainer'), email=pkg.get('maintainer_email'), fallback=_("Maintainer not given")) c.package_relationships = context['package'].get_relationships_printable() c.pkg_extras = [] for extra in sorted(pkg.get('extras',[]), key=lambda x:x['key']): if extra.get('state') == 'deleted': continue k, v = extra['key'], extra['value'] if k in g.package_hide_extras: continue if isinstance(v, (list, tuple)): v = ", ".join(map(unicode, v)) c.pkg_extras.append((k, v)) #dportoles: duplicate and format markdown for if k == 'Data Dictionary': c.pkg_dataDictionary_formatted = h.render_markdown(v) if k == 'Data Quality': c.pkg_dataQuality_formatted = h.render_markdown(v) # lorena Extras IAEST c.pkg_extrasIAEST = [] for extra in sorted(pkg.get('extrasIAEST',[]), key=lambda x:x['key']): if extra.get('state') == 'deleted': continue k, v = extra['key'], extra['value'] if k in g.package_hide_extras: continue if isinstance(v, (list, tuple)): v = ", ".join(map(unicode, v)) c.pkg_extrasIAEST.append((k, v)) if context.get('revision_id') or context.get('revision_date'): # request was for a specific revision id or date c.pkg_revision_id = c.pkg_dict[u'revision_id'] c.pkg_revision_timestamp = c.pkg_dict[u'revision_timestamp'] c.pkg_revision_not_latest = c.pkg_dict[u'revision_id'] != c.pkg.revision.id
def test_normal_link(self): instr = "http://somelink/" exp = '<a href="http://somelink/" target="_blank" rel="nofollow">http://somelink/</a>' out = h.render_markdown(instr) assert exp in out, "\nGot: %s\nWanted: %s" % (out, exp) instr = "http://somelink.com/#anchor" exp = '<a href="http://somelink.com/#anchor" target="_blank" rel="nofollow">http://somelink.com/#anchor</a>' out = h.render_markdown(instr) assert exp in out, "\nGot: %s\nWanted: %s" % (out, exp) instr = "http://www.somelink.com/#anchor" exp = '<a href="http://www.somelink.com/#anchor" target="_blank" rel="nofollow">http://www.somelink.com/#anchor</a>' out = h.render_markdown(instr) assert exp in out, "\nGot: %s\nWanted: %s" % (out, exp)
def setup_template_variables(self, context, data_dict): from ckan.lib.helpers import render_markdown authz_fn = logic.get_action('group_list_authz') c.groups_authz = authz_fn(context, data_dict) data_dict.update({'available_only': True}) c.groups_available = authz_fn(context, data_dict) c.licenses = [('', '')] + base.model.Package.get_license_options() # CS: bad_spelling ignore 2 lines c.licences = c.licenses maintain.deprecate_context_item('licences', 'Use `c.licenses` instead') c.is_sysadmin = ckan.new_authz.is_sysadmin(c.user) if c.pkg: c.related_count = c.pkg.related_count c.pkg_notes_formatted = render_markdown(c.pkg.notes) if context.get('revision_id') or context.get('revision_date'): c.pkg_revision_id = c.pkg_dict[u'revision_id'] c.pkg_revision_timestamp = c.pkg_dict[u'revision_timestamp'] c.pkg_revision_not_latest = c.pkg_dict[u'revision_id'] != c.pkg.revision.id ## This is messy as auths take domain object not data_dict context_pkg = context.get('package', None) pkg = context_pkg or c.pkg if pkg: try: if not context_pkg: context['package'] = pkg logic.check_access('package_change_state', context) c.auth_for_change_state = True except logic.NotAuthorized: c.auth_for_change_state = False
def as_dict(self, ref_package_by='name', ref_group_by='name'): _dict = domain_object.DomainObject.as_dict(self) # Set 'license' in _dict to cater for old clients. # Todo: Remove from Version 2? _dict['license'] = self.license.title if self.license else _dict.get('license_id', '') _dict['isopen'] = self.isopen() tags = [tag.name for tag in self.get_tags()] tags.sort() # so it is determinable _dict['tags'] = tags groups = [getattr(group, ref_group_by) for group in self.get_groups()] groups.sort() _dict['groups'] = groups _dict['extras'] = dict([(key, value) for key, value in self.extras.items()]) _dict['ratings_average'] = self.get_average_rating() _dict['ratings_count'] = len(self.ratings) _dict['resources'] = [res.as_dict(core_columns_only=False) \ for res in self.resources] site_url = config.get('ckan.site_url', None) if site_url: _dict['ckan_url'] = '%s/dataset/%s' % (site_url, self.name) _dict['relationships'] = [rel.as_dict(self, ref_package_by=ref_package_by) for rel in self.get_relationships()] _dict['metadata_modified'] = self.metadata_modified.isoformat() \ if self.metadata_modified else None _dict['metadata_created'] = self.metadata_created.isoformat() \ if self.metadata_created else None import ckan.lib.helpers as h _dict['notes_rendered'] = h.render_markdown(self.notes) _dict['type'] = self.type or u'dataset' return _dict
def _setup_template_variables(self, user_dict): def search_url(params): url = h.url_for(controller='ckanext.publicamundi.controllers.user:UserController', action='show_dashboard_resources') return url_with_params(url, params) def url_with_params(url, params): params = _encode_params(params) return url + u'?' + urlencode(params) def _encode_params(params): return [(k, v.encode('utf-8') if isinstance(v, basestring) else str(v)) for k, v in params] def pager_url(q=None, page=None): params_nopage = [(k, v) for k, v in request.params.items() if k != 'page'] params = list(params_nopage) params.append(('page', page)) return search_url(params) c.user_dict = user_dict c.is_myself = user_dict['name'] == c.user c.about_formatted = h.render_markdown(user_dict['about']) #Resources page items _resources_page_items = int(config.get('ckanext.publicamundi.dashboard.resources.num_page_items', 10)) # datasets paging c.page = h.Page( collection=user_dict['datasets'], page=request.params.get('page', 1), url=pager_url, items_per_page=_resources_page_items )
def _add_to_pkg_dict(self, context, pkg_dict): ''' Add key/values to pkg_dict and return it. ''' if pkg_dict['type'] != 'showcase': return pkg_dict # Add a display url for the Showcase image to the pkg dict so template # has access to it. image_url = pkg_dict.get('image_url') pkg_dict[u'image_display_url'] = image_url if image_url and not image_url.startswith('http'): pkg_dict[u'image_url'] = image_url pkg_dict[u'image_display_url'] = \ h.url_for_static('uploads/{0}/{1}' .format(DATASET_TYPE_NAME, pkg_dict.get('image_url')), qualified=True) # Add dataset count pkg_dict[u'num_datasets'] = len(toolkit.get_action('ckanext_showcase_package_list') (context, {'showcase_id': pkg_dict['id']})) # Rendered notes pkg_dict[u'showcase_notes_formatted'] = h.render_markdown(pkg_dict['notes']) return pkg_dict
def as_dict(self, ref_package_by="name", ref_group_by="name"): _dict = domain_object.DomainObject.as_dict(self) # Set 'license' in _dict to cater for old clients. # Todo: Remove from Version 2? _dict["license"] = self.license.title if self.license else _dict.get("license_id", "") _dict["isopen"] = self.isopen() tags = [tag.name for tag in self.get_tags()] tags.sort() # so it is determinable _dict["tags"] = tags groups = [getattr(group, ref_group_by) for group in self.get_groups()] groups.sort() _dict["groups"] = groups _dict["extras"] = dict([(key, value) for key, value in self.extras.items()]) _dict["ratings_average"] = self.get_average_rating() _dict["ratings_count"] = len(self.ratings) _dict["resources"] = [res.as_dict(core_columns_only=False) for res in self.resources] site_url = config.get("ckan.site_url", None) if site_url: _dict["ckan_url"] = "%s/dataset/%s" % (site_url, self.name) _dict["relationships"] = [rel.as_dict(self, ref_package_by=ref_package_by) for rel in self.get_relationships()] _dict["metadata_modified"] = self.metadata_modified.isoformat() if self.metadata_modified else None _dict["metadata_created"] = self.metadata_created.isoformat() if self.metadata_created else None import ckan.lib.helpers as h _dict["notes_rendered"] = h.render_markdown(self.notes) _dict["type"] = self.type or u"dataset" # tracking import ckan.model as model tracking = model.TrackingSummary.get_for_package(self.id) _dict["tracking_summary"] = tracking return _dict
def __init__(self): with open(_JSON_NAME) as j: schema = json.load(j) # make markdown less noisy markdown_log = logging.getLogger('MARKDOWN') markdown_log.setLevel(logging.WARNING) self.intro = schema['intro'] self.languages = schema['languages'] self.dataset_sections = schema['dataset_sections'] self.dataset_fields = [] for s in self.dataset_sections: self.dataset_fields.extend(s['fields']) self.resource_fields = schema['resource_fields'] self.dataset_field_by_id = dict((f['id'], f) for f in self.dataset_fields) self.metadata_field_by_id = dict((f['id'], f) for f in self.dataset_sections[1]['fields']) self.resource_field_by_id = dict((f['id'], f) for f in self.resource_fields) self.vocabularies = {} for k, v in schema['vocabularies'].iteritems(): self.vocabularies[k] = self.dataset_field_by_id[v]['choices'] self.dataset_field_by_id[v]['vocabulary'] = k for f in self.dataset_fields + self.resource_fields: f['description_html'] = dict((k, render_markdown(v)) for k, v in f['description'].items()) if 'choices' not in f: continue f['choices_by_pilot_uuid'] = dict((c['pilot_uuid'], c) for c in f['choices'] if 'pilot_uuid' in c) f['choices_by_key'] = dict((c['key'], c) for c in f['choices'] if 'key' in c) f['choices_by_id'] = dict((c['id'], c) for c in f['choices'] if 'id' in c) self.all_package_fields = frozenset(ckan_id for ckan_id, ignore, field in self.dataset_field_iter(include_existing=True)) self.extra_package_fields = frozenset(ckan_id for ckan_id, ignore, field in self.dataset_field_iter(include_existing=False)) self.existing_package_fields = self.all_package_fields - self.extra_package_fields self.all_resource_fields = frozenset(ckan_id for ckan_id, ignore, field in self.resource_field_iter(include_existing=True)) self.extra_resource_fields = frozenset(ckan_id for ckan_id, ignore, field in self.resource_field_iter(include_existing=False)) self.existing_resource_fields = self.all_resource_fields - self.extra_resource_fields
def _setup_template_variables(self, context, data_dict): c.is_sysadmin = authz.is_sysadmin(c.user) try: user_dict = get_action('user_show')(context, data_dict) except NotFound: abort(404, _('User not found')) except NotAuthorized: abort(401, _('Not authorized to see this page')) c.user_dict = user_dict lang = get_lang()[0] # MULTILANG - Localizing Datasets names and descriptions in search list for item in c.user_dict.get('datasets'): log.info(':::::::::::: Retrieving the corresponding localized title and abstract :::::::::::::::') q_results = model.Session.query(PackageMultilang).filter(PackageMultilang.package_id == item.get('id'), PackageMultilang.lang == lang).all() if q_results: for result in q_results: item[result.field] = result.text c.is_myself = user_dict['name'] == c.user c.about_formatted = h.render_markdown(user_dict['about'])
def _read(self, id, limit): # FIXME: copied and modified from GroupController to collect # sub organizations, create c.fields_grouped and hard-code # search facets ''' This is common code used by both read and bulk_process''' group_type = self._get_group_type(id.split('@')[0]) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'schema': self._db_to_form_schema(group_type=group_type), 'for_view': True, 'extras_as_string': True} # Get the subgorgs of this org org_id = c.group_dict.get('id') q = c.q = request.params.get('q', '') suborgs = ['"' + org + '"' for org in get_suborgs(org_id)] if suborgs != []: q += ' owner_org:("' + org_id + '" OR ' + ' OR '.join(suborgs) + ')' else : q += ' owner_org:"%s"' % org_id c.description_formatted = h.render_markdown(c.group_dict.get('description')) context['return_query'] = True try: page = int(request.params.get('page', 1)) except ValueError, e: abort(400, ('"page" parameter must be an integer'))
def _read(self, id, limit): """ This is common code used by both read and bulk_process""" group_type = self._get_group_type(id.split("@")[0]) context = { "model": model, "session": model.Session, "user": c.user or c.author, "schema": self._db_to_form_schema(group_type=group_type), "for_view": True, "extras_as_string": True, } q = c.q = request.params.get("q", "") # Search within group if c.group_dict.get("is_organization"): q += ' owner_org:"%s"' % c.group_dict.get("id") else: q += ' groups:"%s"' % c.group_dict.get("name") c.description_formatted = h.render_markdown(c.group_dict.get("description")) context["return_query"] = True # c.group_admins is used by CKAN's legacy (Genshi) templates only, # if we drop support for those then we can delete this line. c.group_admins = new_authz.get_group_or_org_admin_ids(c.group.id) try: page = int(request.params.get("page", 1)) except ValueError, e: abort(400, ('"page" parameter must be an integer'))
def _read(self, id, limit): ''' This is common code used by both read and bulk_process''' group_type = self._get_group_type(id.split('@')[0]) context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'schema': self._db_to_form_schema(group_type=group_type), 'for_view': True, 'extras_as_string': True} q = c.q = request.params.get('q', '') # Search within group if c.group_dict.get('is_organization'): q += ' owner_org:"%s"' % c.group_dict.get('id') else: q += ' groups:"%s"' % c.group_dict.get('name') c.description_formatted = h.render_markdown(c.group_dict.get('description')) context['return_query'] = True # c.group_admins is used by CKAN's legacy (Genshi) templates only, # if we drop support for those then we can delete this line. c.group_admins = new_authz.get_group_or_org_admin_ids(c.group.id) try: page = int(request.params.get('page', 1)) except ValueError, e: abort(400, ('"page" parameter must be an integer'))
def package_to_api(pkg, context): api_version = context.get('api_version') assert api_version, 'No api_version supplied in context' dictized = package_dictize(pkg, context) dictized.pop("revision_timestamp") dictized["tags"] = [tag["name"] for tag in dictized["tags"] \ if not tag.get('vocabulary_id')] dictized["extras"] = dict((extra["key"], extra["value"]) for extra in dictized["extras"]) dictized['license'] = pkg.license.title if pkg.license else None dictized['ratings_average'] = pkg.get_average_rating() dictized['ratings_count'] = len(pkg.ratings) dictized['notes_rendered'] = h.render_markdown(pkg.notes) site_url = config.get('ckan.site_url', None) if site_url: dictized['ckan_url'] = '%s/dataset/%s' % (site_url, pkg.name) for resource in dictized["resources"]: resource_dict_to_api(resource, pkg.id, context) def make_api_1(package_id): return pkg.get(package_id).name def make_api_2(package_id): return package_id if api_version == 1: api_fn = make_api_1 dictized["groups"] = [group["name"] for group in dictized["groups"]] # FIXME why is this just for version 1? if pkg.resources: dictized['download_url'] = pkg.resources[0].url else: api_fn = make_api_2 dictized["groups"] = [group["id"] for group in dictized["groups"]] subjects = dictized.pop("relationships_as_subject") objects = dictized.pop("relationships_as_object") relationships = [] for rel in objects: model = context['model'] swap_types = model.PackageRelationship.forward_to_reverse_type type = swap_types(rel['type']) relationships.append({'subject': api_fn(rel['object_package_id']), 'type': type, 'object': api_fn(rel['subject_package_id']), 'comment': rel["comment"]}) for rel in subjects: relationships.append({'subject': api_fn(rel['subject_package_id']), 'type': rel['type'], 'object': api_fn(rel['object_package_id']), 'comment': rel["comment"]}) dictized['relationships'] = relationships return dictized
def test_tag_names_dont_match_non_space_whitespace(self): """Asserts that the only piece of whitespace matched in a tagname is a space""" whitespace_characters = '\t\n\r\f\v' for ch in whitespace_characters: data = 'tag:Bad' + ch + 'space' output = '<p><a href="/tag/Bad">tag:Bad</a>' result = h.render_markdown(data) assert output in result, '\nGot: %s\nWanted: %s' % (result, output)
def render_markdown_strip(text, extract_length=190): ''' return the plain text representation of markdown encoded text. That is the texted without any html tags. If extract_length is 0 then it will not be truncated.''' result_text = h.render_markdown(text, extract_length) result = result_text.rstrip('\n').replace( '\n', ' ').replace('\r', '').replace('"', """) return result
def test_tag_names_dont_match_non_space_whitespace(self): """Asserts that the only piece of whitespace matched in a tagname is a space""" whitespace_characters = "\t\n\r\f\v" for ch in whitespace_characters: instr = "tag:Bad" + ch + "space" exp = '<a href="/tag/Bad">tag:Bad</a>' out = h.render_markdown(instr) assert exp in out, "\nGot: %s\nWanted: %s" % (out, exp)
def test_multiline_links(self): instr = u'''I get 10 times more traffic from [Google][] than from [Yahoo][] or [MSN][]. [google]: http://google.com/ "Google" [yahoo]: http://search.yahoo.com/ "Yahoo Search" [msn]: http://search.msn.com/ "MSN Search"''' exp = '''<p>I get 10 times more traffic from <a href="http://google.com/" title="Google">Google</a> than from <a href="http://search.yahoo.com/" title="Yahoo Search">Yahoo</a> or <a href="http://search.msn.com/" title="MSN Search">MSN</a>.</p>''' out = h.render_markdown(instr) assert exp in out, '\nGot: %s\nWanted: %s' % (out, exp)
def _setup_template_variables(self, context, data_dict): c.is_sysadmin = new_authz.is_sysadmin(c.user) try: user_dict = get_action('user_show')(context, data_dict) except NotFound: abort(404, _('User not found')) except NotAuthorized: abort(401, _('Not authorized to see this page')) c.user_dict = user_dict c.is_myself = user_dict['name'] == c.user c.about_formatted = h.render_markdown(user_dict['about'])
def _setup_template_variables(self, context, data_dict): c.is_sysadmin = authz.is_sysadmin(c.user) try: user_dict = get_action("user_show")(context, data_dict) except NotFound: abort(404, _("User not found")) except NotAuthorized: abort(401, _("Not authorized to see this page")) c.user_dict = user_dict c.is_myself = user_dict["name"] == c.user c.about_formatted = h.render_markdown(user_dict["about"])
def _distribution_url_graph(self, distribution, resource_dict): url = resource_dict.get('url') res_type = resource_dict.get('resource_type') if url and res_type == 'file': self.g.add((distribution, SCHEMA.contentUrl, Literal(url))) if url: self.g.add((distribution, SCHEMA.url, Literal(url))) theme_plugin = plugin.StadtzhThemePlugin() descriptions = theme_plugin.get_resource_descriptions(resource_dict) description = render_markdown(" ".join(descriptions)) self.g.add((distribution, SCHEMA.description, Literal(description)))
def test_markdown(self): instr = '''# Hello World **Some bolded text.** *Some italicized text.* ''' exp = '''<h1>Hello World</h1> <p><strong>Some bolded text.</strong></p> <p><em>Some italicized text.</em></p>''' out = h.render_markdown(instr) assert out == exp, out
def methodology_bk_compat(meth, other, render=True): if not meth and not other: return (None, None) standard_meths = ["Census", "Sample Survey", "Direct Observational Data/Anecdotal Data", "Registry", "Other"] if meth in standard_meths and meth != "Other": if render: return (meth, None) else: return (meth, None) elif other: if render: return ("Other", h.render_markdown(other)) else: return ("Other", other) else: meth = meth.split('Other - ') if render: return ("Other", h.render_markdown(meth[0])) else: return ("Other", meth[0])
def _get_package(self, id): """ Given an ID use the logic layer to fetch the Package and a dict representation of it as well as adding formatted notes and the publisher to the template context (c). """ import genshi context = {'model': model, 'session': model.Session, 'user': c.user or c.author, 'extras_as_string': True, 'for_view': True} try: c.pkg_dict = get_action('package_show')(context, {'id': id}) c.pkg = context['package'] except ObjectNotFound: abort(404, _('Dataset not found')) except NotAuthorized: abort(401, _('Unauthorized to read package %s') % id) groups = c.pkg.get_groups('organization') if groups: c.publisher = groups[0] else: log.warning("Package {0} is not a member of any group!".format(id)) # Try and render the notes as markdown for display on the page. Most # unpublished items *won't* be markdown if they've come directly from the # CSV - unless they've been edited. try: notes_formatted = render_markdown(c.pkg.notes) c.pkg_notes_formatted = unicode(genshi.HTML(notes_formatted)) c.release_notes_formatted = None notes = unpublished_release_notes(c.pkg_dict) if notes and notes.strip(): c.release_notes_formatted = unicode(genshi.HTML( render_markdown(notes))) except Exception: c.pkg_notes_formatted = c.pkg.notes
def view_help(self): def _get_help_text(language): return pkg_resources.resource_string( __name__, '/'.join(['public', 'static', 'faq_{language}.md'.format( language=language )]) ) try: # Try to load FAQ text for the user's language. faq_text = _get_help_text(c.language) except IOError: # Fall back to using English if no local langauge could be found. faq_text = _get_help_text(u'en') # Convert the markdown to HTML ... faq_html = render_markdown(faq_text.decode("utf-8"), allow_html=True) h = html.fromstring(faq_html) # Get every FAQ point header. for faq_section in h.xpath('.//h2'): details = ET.Element('details') summary = ET.Element('summary') # Place the new details tag where the FAQ section header used to # be. faq_section.addprevious(details) # Get all the text that follows the FAQ header. while True: next_node = faq_section.getnext() if next_node is None or next_node.tag in ('h1', 'h2'): break # ... and add it to the details. details.append(next_node) # Move the FAQ header to the top of the summary tag. summary.insert(0, faq_section) # Move the summary tag to the top of the details tag. details.insert(0, summary) # We don't actaully want the FAQ headers to be headings, so strip # the tags and just leave the text. faq_section.drop_tag() return render('help.html', extra_vars={ 'faq_html': html.tostring(h), # For use with the inline debugger. 'faq_text': faq_text })
def _setup_template_variables(self, context, data_dict): c.is_sysadmin = authz.is_sysadmin(c.user) try: user_dict = get_action('user_show')(context, data_dict) except NotFound: h.flash_error(_('Not authorized to see this page')) h.redirect_to(controller='user', action='login') except NotAuthorized: abort(403, _('Not authorized to see this page')) c.user_dict = user_dict c.is_myself = user_dict['name'] == c.user c.about_formatted = h.render_markdown(user_dict['about'])
def test_multiline_links(self): instr = u'''I get 10 times more traffic from [Google][] than from [Yahoo][] or [MSN][]. [google]: http://google.com/ "Google" [yahoo]: http://search.yahoo.com/ "Yahoo Search" [msn]: http://search.msn.com/ "MSN Search"''' exp = """<p>I get 10 times more traffic from <a href="http://google.com/" title="Google">Google</a> than from <a href="http://search.yahoo.com/" title="Yahoo Search">Yahoo</a> or <a href="http://search.msn.com/" title="MSN Search">MSN</a>. </p>""" # NB when this is put into Genshi, it will close the tag for you. out = h.render_markdown(instr) assert exp in out, "\nGot: %s\nWanted: %s" % (out, exp)
def test_markdown(self): instr = """# Hello World **Some bolded text.** *Some italicized text.* """ exp = """<h1>Hello World</h1> <p><strong>Some bolded text.</strong> </p> <p><em>Some italicized text.</em> </p>""" out = h.render_markdown(instr) assert out == exp
def test_render_markdown(self, data, output, allow_html): assert h.render_markdown(data, allow_html=allow_html) == output
def test_markdown_blank(self): instr = None out = h.render_markdown(instr) assert out == ''
def test_internal_link(self): instr = 'dataset:test-_pkg' exp = '<p><a href="/dataset/test-_pkg">dataset:test-_pkg</a></p>' out = h.render_markdown(instr) assert exp in out, '\nGot: %s\nWanted: %s' % (out, exp)
def test_internal_tag_linked_with_quotes_and_space(self): """Asserts links like 'tag:"test tag"' work""" instr = 'tag:"test tag" foobar' exp = '<p><a href="/tag/test%20tag">tag:"test tag"</a> foobar</p>' out = h.render_markdown(instr) assert exp in out, '\nGot: %s\nWanted: %s' % (out, exp)
def test_tag_names_match_simple_punctuation(self): """Asserts punctuation and capital letters are matched in the tag name""" instr = 'tag:"Test- _." foobar' exp = '<p><a href="/tag/Test-%20_.">tag:"Test- _."</a> foobar</p>' out = h.render_markdown(instr) assert exp in out, '\nGot: %s\nWanted: %s' % (out, exp)
def test_tags_img(self): data = u'![image](/image.png)' output = u'<p><img alt="image" src="/image.png"></p>' eq_(h.render_markdown(data), output)
def test_tags_h1(self): data = u'#heading' output = u'<h1>heading</h1>' eq_(h.render_markdown(data), output)
def _read(id, limit, group_type): u''' This is common code used by both read and bulk_process''' extra_vars = {} context = { u'model': model, u'session': model.Session, u'user': g.user, u'schema': _db_to_form_schema(group_type=group_type), u'for_view': True, u'extras_as_string': True } q = request.params.get(u'q', u'') # TODO: Remove # ckan 2.9: Adding variables that were removed from c object for # compatibility with templates in existing extensions g.q = q # Search within group if g.group_dict.get(u'is_organization'): fq = u' owner_org:"%s"' % g.group_dict.get(u'id') else: fq = u' groups:"%s"' % g.group_dict.get(u'name') extra_vars["q"] = q g.description_formatted = \ h.render_markdown(g.group_dict.get(u'description')) context['return_query'] = True page = h.get_page_number(request.params) # most search operations should reset the page counter: params_nopage = [(k, v) for k, v in request.params.items() if k != u'page'] sort_by = request.params.get(u'sort', None) def search_url(params): controller = lookup_group_controller(group_type) action = u'bulk_process' if getattr( g, u'action', u'') == u'bulk_process' else u'read' url = h.url_for(u'.'.join([controller, action]), id=id) params = [ (k, v.encode(u'utf-8') if isinstance(v, string_types) else str(v)) for k, v in params ] return url + u'?' + urlencode(params) def drill_down_url(**by): return h.add_url_param(alternative_url=None, controller=u'group', action=u'read', extras=dict(id=g.group_dict.get(u'name')), new_params=by) extra_vars["drill_down_url"] = drill_down_url def remove_field(key, value=None, replace=None): controller = lookup_group_controller(group_type) return h.remove_url_param(key, value=value, replace=replace, controller=controller, action=u'read', extras=dict(id=g.group_dict.get(u'name'))) extra_vars["remove_field"] = remove_field def pager_url(q=None, page=None): params = list(params_nopage) params.append((u'page', page)) return search_url(params) details = _get_search_details() extra_vars[u'fields'] = details[u'fields'] extra_vars[u'fields_grouped'] = details[u'fields_grouped'] fq += details[u'fq'] search_extras = details[u'search_extras'] # TODO: Remove # ckan 2.9: Adding variables that were removed from c object for # compatibility with templates in existing extensions g.fields = extra_vars[u'fields'] g.fields_grouped = extra_vars[u'fields_grouped'] facets = OrderedDict() default_facet_titles = { u'organization': _(u'Organizations'), u'groups': _(u'Groups'), u'data_type': _(u'Data type'), u'tags': _(u'Tags'), u'res_format': _(u'Formats'), u'license_id': _(u'Licenses') } for facet in h.facets(): if facet in default_facet_titles: facets[facet] = default_facet_titles[facet] else: facets[facet] = facet # Facet titles facets = _update_facet_titles(facets, group_type) extra_vars["facet_titles"] = facets data_dict = { u'q': q, u'fq': fq, u'include_private': True, u'facet.field': list(facets.keys()), u'rows': limit, u'sort': sort_by, u'start': (page - 1) * limit, u'extras': search_extras } context_ = dict((k, v) for (k, v) in context.items() if k != u'schema') try: query = get_action(u'package_search')(context_, data_dict) except search.SearchError as se: log.error(u'Group search error: %r', se.args) extra_vars["query_error"] = True extra_vars["page"] = h.Page(collection=[]) else: extra_vars["page"] = h.Page(collection=query['results'], page=page, url=pager_url, item_count=query['count'], items_per_page=limit) # TODO: Remove # ckan 2.9: Adding variables that were removed from c object for # compatibility with templates in existing extensions g.group_dict['package_count'] = query['count'] extra_vars["search_facets"] = g.search_facets = query['search_facets'] extra_vars["search_facets_limits"] = g.search_facets_limits = {} for facet in g.search_facets.keys(): limit = int( request.params.get(u'_%s_limit' % facet, config.get(u'search.facets.default', 10))) g.search_facets_limits[facet] = limit extra_vars["page"].items = query['results'] extra_vars["sort_by_selected"] = sort_by # TODO: Remove # ckan 2.9: Adding variables that were removed from c object for # compatibility with templates in existing extensions g.facet_titles = facets g.page = extra_vars["page"] extra_vars["group_type"] = group_type _setup_template_variables(context, {u'id': id}, group_type=group_type) return extra_vars
def markdown(self, ver=None): raw_markdown = request.params.get('q', '') results = h.render_markdown(raw_markdown) return self._finish_ok(results)
def test_tag_names_with_unicode_alphanumeric(self): """Asserts that unicode alphanumeric characters are captured""" instr = u'tag:"Japanese katakana \u30a1" blah' exp = u'<p><a href="/tag/Japanese%20katakana%20%E3%82%A1">tag:"Japanese katakana \u30a1"</a> blah</p>' out = h.render_markdown(instr) assert exp in out, u'\nGot: %s\nWanted: %s' % (out, exp)
def package_to_api(pkg, context): api_version = context.get('api_version') assert api_version, 'No api_version supplied in context' dictized = package_dictize(pkg, context) dictized["tags"] = [tag["name"] for tag in dictized["tags"] \ if not tag.get('vocabulary_id')] dictized["extras"] = dict( (extra["key"], extra["value"]) for extra in dictized["extras"]) dictized['license'] = pkg.license.title if pkg.license else None dictized['ratings_average'] = pkg.get_average_rating() dictized['ratings_count'] = len(pkg.ratings) dictized['notes_rendered'] = h.render_markdown(pkg.notes) site_url = config.get('ckan.site_url', None) if site_url: dictized['ckan_url'] = '%s/dataset/%s' % (site_url, pkg.name) for resource in dictized["resources"]: resource_dict_to_api(resource, pkg.id, context) def make_api_1(package_id): return pkg.get(package_id).name def make_api_2(package_id): return package_id if api_version == 1: api_fn = make_api_1 dictized["groups"] = [group["name"] for group in dictized["groups"]] # FIXME why is this just for version 1? if pkg.resources: dictized['download_url'] = pkg.resources[0].url else: api_fn = make_api_2 dictized["groups"] = [group["id"] for group in dictized["groups"]] subjects = dictized.pop("relationships_as_subject") objects = dictized.pop("relationships_as_object") relationships = [] for rel in objects: model = context['model'] swap_types = model.PackageRelationship.forward_to_reverse_type type = swap_types(rel['type']) relationships.append({ 'subject': api_fn(rel['object_package_id']), 'type': type, 'object': api_fn(rel['subject_package_id']), 'comment': rel["comment"] }) for rel in subjects: relationships.append({ 'subject': api_fn(rel['subject_package_id']), 'type': rel['type'], 'object': api_fn(rel['object_package_id']), 'comment': rel["comment"] }) dictized['relationships'] = relationships return dictized
def test_tag_names_do_not_match_commas(self): """Asserts commas don't get matched as part of a tag name""" data = "tag:Test,tag foobar" output = '<p><a href="/dataset/?tags=Test">tag:Test</a>,tag foobar</p>' assert h.render_markdown(data) == output
def test_render_markdown_with_js(self): data = u'[text](javascript: alert(1))' output = u'<p><a>text</a></p>' eq_(h.render_markdown(data), output)
def test_tag_names_do_not_match_commas(self): """Asserts commas don't get matched as part of a tag name""" instr = 'tag:Test,tag foobar' exp = '<a href="/tag/Test">tag:Test</a>,tag foobar' out = h.render_markdown(instr) assert exp in out, '\nGot: %s\nWanted: %s' % (out, exp)
def test_event_attributes(self): data = u'<p onclick="some.script"><img onmouseover="some.script" src="image.png" /> and text</p>' output = u'<p>and text</p>' eq_(h.render_markdown(data), output)
def test_render_markdown_allow_html(self): data = '<h1>moo</h1>' eq_(h.render_markdown(data, allow_html=True), data)
def test_tags_h3(self): data = u'###heading' output = u'<h3>heading</h3>' eq_(h.render_markdown(data), output)
def test_render_markdown_not_allow_html(self): data = '<h1>moo</h1>' output = '<p>moo</p>' eq_(h.render_markdown(data), output)
def test_bold(self): data = u'Something **important**' output = u'<p>Something <strong>important</strong></p>' eq_(h.render_markdown(data), output)
def test_render_markdown_auto_link_without_path(self): data = 'http://example.com' output = '<p><a href="http://example.com" target="_blank" rel="nofollow">http://example.com</a></p>' eq_(h.render_markdown(data), output)
def test_internal_tag_with_no_opening_quote_only_matches_single_word(self): """Asserts that without an opening quote only one word is matched""" instr = 'tag:test tag" foobar' # should match 'tag:test' exp = '<a href="/tag/test">tag:test</a> tag" foobar' out = h.render_markdown(instr) assert exp in out, '\nGot: %s\nWanted: %s' % (out, exp)
def test_render_markdown_auto_link(self): data = 'https://example.com/page.html' output = '<p><a href="https://example.com/page.html" target="_blank" rel="nofollow">https://example.com/page.html</a></p>' eq_(h.render_markdown(data), output)
def test_internal_tag_link(self): """Asserts links like 'tag:test-tag' work""" instr = 'tag:test-tag foobar' exp = '<a href="/tag/test-tag">tag:test-tag</a> foobar' out = h.render_markdown(instr) assert exp in out, '\nGot: %s\nWanted: %s' % (out, exp)
def test_render_markdown_auto_link_ignoring_trailing_punctuation(self): data = 'My link: http://example.com/page.html.' output = '<p>My link: <a href="http://example.com/page.html" target="_blank" rel="nofollow">http://example.com/page.html</a>.</p>' eq_(h.render_markdown(data), output)
def test_evil_markdown(self): instr = 'Evil <script src="http://evilserver.net/evil.js";>' exp = '''<p>Evil </p>''' out = h.render_markdown(instr) assert out == exp, out
def test_render_naughty_markdown(self): data = u'* [Foo (http://foo.bar) * Bar] (http://foo.bar)' output = u'<ul>\n<li>[Foo (<a href="http://foo.bar" target="_blank" rel="nofollow">http://foo.bar</a>) * Bar] (<a href="http://foo.bar" target="_blank" rel="nofollow">http://foo.bar</a>)</li>\n</ul>' eq_(h.render_markdown(data), output)
def _read(self, id, limit, group_type): ''' This is common code used by both read and bulk_process''' context = { 'model': model, 'session': model.Session, 'user': c.user, 'schema': self._db_to_form_schema(group_type=group_type), 'for_view': True, 'extras_as_string': True } q = c.q = request.params.get('q', '') # Search within group if c.group_dict.get('is_organization'): q += ' owner_org:"%s"' % c.group_dict.get('id') else: q += ' groups:"%s"' % c.group_dict.get('name') c.description_formatted = \ h.render_markdown(c.group_dict.get('description')) context['return_query'] = True page = h.get_page_number(request.params) # most search operations should reset the page counter: params_nopage = [(k, v) for k, v in request.params.items() if k != 'page'] sort_by = request.params.get('sort', None) def search_url(params): controller = lookup_group_controller(group_type) action = 'bulk_process' if c.action == 'bulk_process' else 'read' url = h.url_for(controller=controller, action=action, id=id) params = [ (k, v.encode('utf-8') if isinstance(v, basestring) else str(v)) for k, v in params ] return url + u'?' + urlencode(params) def drill_down_url(**by): return h.add_url_param(alternative_url=None, controller='group', action='read', extras=dict(id=c.group_dict.get('name')), new_params=by) c.drill_down_url = drill_down_url def remove_field(key, value=None, replace=None): controller = lookup_group_controller(group_type) return h.remove_url_param(key, value=value, replace=replace, controller=controller, action='read', extras=dict(id=c.group_dict.get('name'))) c.remove_field = remove_field def pager_url(q=None, page=None): params = list(params_nopage) params.append(('page', page)) return search_url(params) c.search_url_params = urlencode(_encode_params(params_nopage)) try: c.fields = [] c.fields_grouped = {} search_extras = {} for (param, value) in request.params.items(): if param not in ['q', 'page', 'sort'] \ and len(value) and not param.startswith('_'): if not param.startswith('ext_'): c.fields.append((param, value)) q += ' %s: "%s"' % (param, value) if param not in c.fields_grouped: c.fields_grouped[param] = [value] else: c.fields_grouped[param].append(value) else: search_extras[param] = value facets = OrderedDict() default_facet_titles = { 'organization': _('Organizations'), 'groups': _('Groups'), 'tags': _('Tags'), 'res_format': _('Formats'), 'license_id': _('Licenses') } for facet in h.facets(): if facet in default_facet_titles: facets[facet] = default_facet_titles[facet] else: facets[facet] = facet # Facet titles self._update_facet_titles(facets, group_type) c.facet_titles = facets data_dict = { 'q': q, 'fq': '', 'include_private': True, 'facet.field': facets.keys(), 'rows': limit, 'sort': sort_by, 'start': (page - 1) * limit, 'extras': search_extras } context_ = dict( (k, v) for (k, v) in context.items() if k != 'schema') query = get_action('package_search')(context_, data_dict) c.page = h.Page(collection=query['results'], page=page, url=pager_url, item_count=query['count'], items_per_page=limit) c.group_dict['package_count'] = query['count'] c.search_facets = query['search_facets'] c.search_facets_limits = {} for facet in c.search_facets.keys(): limit = int( request.params.get('_%s_limit' % facet, config.get('search.facets.default', 10))) c.search_facets_limits[facet] = limit c.page.items = query['results'] c.sort_by_selected = sort_by except search.SearchError, se: log.error('Group search error: %r', se.args) c.query_error = True c.page = h.Page(collection=[])
def test_internal_tag_with_no_closing_quote_does_not_match(self): """Asserts that without an opening quote only one word is matched""" data = 'tag:"test tag foobar' out = h.render_markdown(data) assert "<a href" not in out
def test_tag_names_match_simple_punctuation(self): """Asserts punctuation and capital letters are matched in the tag name""" data = 'tag:"Test- _." foobar' output = '<p><a href="/dataset/?tags=Test-+_.">tag:"Test- _."</a> foobar</p>' assert h.render_markdown(data) == output
def _read(self, id, limit, group_type): # noqa c.include_children_selected = False if not c.group_dict.get('is_organization'): return ''' This is common code used by both read and bulk_process''' context = { 'model': model, 'session': model.Session, 'user': c.user or c.author, 'schema': self._db_to_form_schema(group_type=group_type), 'for_view': True, 'extras_as_string': True } c.description_formatted = \ h.render_markdown(c.group_dict.get('description')) context['return_query'] = True # c.group_admins is used by CKAN's legacy (Genshi) templates only, # if we drop support for those then we can delete this line. c.group_admins = authz.get_group_or_org_admin_ids(c.group.id) page = self._get_page_number(request.params) # most search operations should reset the page counter: params_nopage = [(k, v) for k, v in request.params.items() if k != 'page'] sort_by = request.params.get('sort', None) def search_url(params): controller = lookup_group_controller(group_type) action = 'bulk_process' if c.action == 'bulk_process' else 'read' url = h.url_for(controller=controller, action=action, id=id) params = [ (k, v.encode('utf-8') if isinstance(v, basestring) else str(v)) for k, v in params ] return url + u'?' + urlencode(params) def drill_down_url(**by): return h.add_url_param(alternative_url=None, controller='group', action='read', extras=dict(id=c.group_dict.get('name')), new_params=by) c.drill_down_url = drill_down_url def remove_field(key, value=None, replace=None): return h.remove_url_param(key, value=value, replace=replace, controller='group', action='read', extras=dict(id=c.group_dict.get('name'))) c.remove_field = remove_field def pager_url(q=None, page=None): params = list(params_nopage) params.append(('page', page)) return search_url(params) try: q = c.q = request.params.get('q', '') fq = c.fq = request.params.get('fq', '') c.fields = [] search_extras = {} for (param, value) in request.params.items(): if param not in ['q', 'page', 'sort'] \ and len(value) and not param.startswith('_'): if not param.startswith('ext_'): c.fields.append((param, value)) fq += ' %s: "%s"' % (param, value) else: search_extras[param] = value user_member_of_orgs = [ org['id'] for org in h.organizations_available('read') ] if (c.group and c.group.id in user_member_of_orgs): context['ignore_capacity_check'] = True else: fq += ' capacity:"public"' facets = OrderedDict() default_facet_titles = { 'organization': _('Organizations'), 'groups': _('Groups'), 'tags': _('Tags'), 'res_format': _('Formats'), 'license_id': _('Licenses') } for facet in g.facets: if facet in default_facet_titles: facets[facet] = default_facet_titles[facet] else: facets[facet] = facet # Facet titles self._update_facet_titles(facets, group_type) if 'capacity' in facets and (group_type != 'organization' or not user_member_of_orgs): del facets['capacity'] c.facet_titles = facets # filter by organization with fq (filter query) c.include_children_selected = True children = _children_name_list( hierarchy_helpers.group_tree_section( c.group_dict.get('id'), include_parents=False, include_siblings=False).get('children', [])) if not children: fq += ' organization:"%s"' % c.group_dict.get('name') else: fq += ' organization:("%s"' % c.group_dict.get('name') for name in children: if name: fq += ' OR "%s"' % name fq += ")" data_dict = { 'q': q, 'fq': fq, 'facet.field': facets.keys(), 'rows': limit, 'sort': sort_by, 'start': (page - 1) * limit, 'extras': search_extras } context_ = dict( (k, v) for (k, v) in context.items() if k != 'schema') query = get_action('package_search')(context_, data_dict) c.page = h.Page(collection=query['results'], page=page, url=pager_url, item_count=query['count'], items_per_page=limit) c.group_dict['package_count'] = query['count'] c.facets = query['facets'] maintain.deprecate_context_item('facets', 'Use `c.search_facets` instead.') c.search_facets = query['search_facets'] c.search_facets_limits = {} for facet in c.facets.keys(): limit = int( request.params.get('_%s_limit' % facet, g.facets_default_number)) c.search_facets_limits[facet] = limit c.page.items = query['results'] c.sort_by_selected = sort_by except search.SearchError, se: log.error('Group search error: %r', se.args) c.query_error = True c.facets = {} c.page = h.Page(collection=[])