def slugify_string(string): """slugify_string the given string""" try: return slugify(string.decode('utf-8'), '-').encode('utf-8', 'ignore').replace('_', '-') except UnicodeEncodeError as e: return slugify(string, '-').replace('_','-')
def slugify_string(string): """slugify_string the given string""" try: return slugify(string.decode('utf-8'), '-').encode('utf-8', 'ignore').replace('_', '-') except UnicodeEncodeError as e: return slugify(string, '-').replace('_', '-')
def search_available_name(self, username): max_length = 30 uuid_length = 8 short_username = slugify(username, '-')[:max_length - uuid_length] final_username = slugify(username, '-')[:max_length] while not self.is_available(final_username): final_username = short_username + uuid4().hex[:uuid_length] return final_username
def group_aron7awol_content(content_meta, content_type) -> dict: grouped_meta = {} if content_type == 'film': for meta in content_meta: if 'avs' in meta: avs = meta['avs'] idx = avs.find('post?id=') avs_post_id = None if idx == -1: idx = avs.find('post-') if idx == -1: print( f"Unparsable post id {meta['repo_file']} - {avs}") else: avs_post_id = avs[idx + 5:] else: avs_post_id = avs[idx + 8:] if avs_post_id: if avs_post_id in grouped_meta: grouped_meta[avs_post_id].append(meta) else: grouped_meta[avs_post_id] = [meta] else: print(f"Missing beq_avs entry for {meta['repo_file']}") else: for meta in content_meta: if 'title' in meta: title = slugify(meta['title'], '-') if title in grouped_meta: grouped_meta[title].append(meta) else: grouped_meta[title] = [meta] return grouped_meta
def run(self, doc): start_level, force_id = self._get_meta() slugify = self.config['slugify'] sep = self.config['separator'] for elem in doc: if elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']: if force_id: if "id" in elem.attrib: id = elem.get('id') else: id = stashedHTML2text(''.join(elem.itertext()), self.md) id = slugify(id, sep) id = unique(id, self.IDs) elem.set('id', id) anchor = Element('a') anchor.set('href', '#' + id) anchor.set('class', 'anchor') elem.append(anchor) if start_level: level = int(elem.tag[-1]) + start_level if level > 6: level = 6 elem.tag = 'h%d' % level
def handleMatch(self, m): try: ref = m.group(9) except IndexError: ref = None shortref = False if not ref: # if we got something like "[Google][]" or "[Google]" # we'll use "google" as the id ref = m.group(2) shortref = True # Clean up linebreaks in ref ref = self.NEWLINE_CLEANUP_RE.sub(' ', ref) text = m.group(2) id = ref.lower() if id in self.markdown.references: href, title = self.markdown.references[id] else: anchor = None if '#' in ref: ref, anchor = ref.split('#', 1) this = self.markdown.this if not posixpath.isabs(ref): # treat empty ref as reference to current page if not ref: ref = this['components'][-1] rootrelpath = '/' + '/'.join(this['components'][:-1]) id = posixpath.normpath(posixpath.join(rootrelpath, ref)) id = id.lower() else: id = ref.lower() ref = ref.lower() if ref in self.markdown.site['reflinks']: if (ref != id) and (id in self.markdown.site['reflinks']): raise UrubuError(_error.ambig_ref_md, msg=ref, fn=this['fn']) id = ref if id in self.markdown.site['reflinks']: item = self.markdown.site['reflinks'][id] href, title = item['url'], item['title'] if shortref: text = title if anchor is not None: text = anchor if anchor is not None: anchor = toc.slugify(anchor, '-') href = '%s#%s' % (href, anchor) anchorref = '%s#%s' % (id, anchor) self.markdown.this['_anchorrefs'].add(anchorref) else: # ignore undefined refs urubu_warn(_warning.undef_ref_md, msg=ref, fn=this['fn']) return None return self.makeTag(href, title, text)
def get_heading_url(self, heading): for page, markdown in self.pages: page_url = page.abs_url or '/' + page.url if heading == page.title: return page_url h_regex = ' *{} *'.format(re.escape(heading)) regex = re.compile('^{0}$\n^[=-]+$|^#+{0}$'.format(h_regex), re.M) if re.search(regex, markdown): return page_url + '#' + slugify(heading, '-')
def process_mobe1969_content_from_repo(content_meta, index_entries, content_type): ''' converts beq_metadata into md ''' if content_type == 'film': by_title = group_mobe1969_film_content(content_meta) else: by_title = group_mobe1969_tv_content(content_meta) for title, metas in by_title.items(): title_md = slugify(title, '-') with open(f"docs/mobe1969/{title_md}.md", mode='w+') as content_md: generate_content_page(title_md, metas, content_md, index_entries, 'mobe1969', content_type)
def _md_to_html(md_file, outf, prefix='', baselevel=1): """Convert markdown to html.""" mdown = read_file(md_file) toc = TocExtension( baselevel=baselevel, slugify=(lambda value, separator: prefix + slugify(value, separator))) outf.write( markdown.markdown(mdown, extensions=['markdown.extensions.tables', toc], output_format='html5', lazy_ol=False))
def handleMatch(self, m, data): text, index, handled = self.getText(data, m.end(0)) if not handled: return None, None, None ref, end, shortref, handled = self.evalRef(data, index, text) if not handled: return None, None, None # Clean up linebreaks in ref ref = self.NEWLINE_CLEANUP_RE.sub(' ', ref) id = ref.lower() if id in self.md.references: href, title = self.md.references[id] else: anchor = None if '#' in ref: ref, anchor = ref.split('#', 1) this = self.md.this if not posixpath.isabs(ref): # treat empty ref as reference to current page if not ref: ref = this['components'][-1] rootrelpath = '/' + '/'.join(this['components'][:-1]) id = posixpath.normpath(posixpath.join(rootrelpath, ref)) id = id.lower() else: id = ref.lower() ref = ref.lower() if ref in self.md.site['reflinks']: if (ref != id) and (id in self.md.site['reflinks']): raise UrubuError(_error.ambig_ref_md, msg=ref, fn=this['fn']) id = ref if id in self.md.site['reflinks']: item = self.md.site['reflinks'][id] href, title = item['url'], item['title'] if shortref: text = title if anchor is not None: text = anchor if anchor is not None: anchor = toc.slugify(anchor, '-') href = '%s#%s' % (href, anchor) anchorref = '%s#%s' % (id, anchor) self.md.this['_anchorrefs'].add(anchorref) else: # ignore undefined refs urubu_warn(_warning.undef_ref_md, msg=ref, fn=this['fn']) return None, None, None return self.makeTag(href, title, text), m.start(0), end
def generate_resources_and_action_ids(JSON_file_path): """Generate an ID for every resource and action in the given JSON file Arguments: JSON_file_path - path to the JSON file containing the API parsed definition""" json_content = "" with open(JSON_file_path, 'rU') as json_file: json_content = json.load(json_file) for resource_group in json_content["resourceGroups"]: for resource in resource_group["resources"]: if len( resource["name"] ) > 0: resource["id"] = 'resource_' + slugify( resource["name"], '-' ) else: resource["id"] = 'resource_' + slugify( resource["uriTemplate"], '-' ) for action in resource["actions"]: if len( action["name"] ) > 0: action["id"] = 'action_' + slugify( action["name"],'-' ) else: if len( action["attributes"]["uriTemplate"] ) > 0: action["id"] = 'action_' + slugify( action["attributes"]["uriTemplate"], '-' ) else: if resource["ignoreTOC"] == True: action["id"] = 'action_' + slugify( resource["uriTemplate"] + action["method"], '-' ) else: action["id"] = 'action_' + slugify( resource["name"] + action["method"], '-' ) with open(JSON_file_path, 'w') as json_file: json.dump(json_content, json_file, indent=4)
def run(self, root): headers = defaultdict(int) current_header = "" ixs = defaultdict(int) for el in root[1:]: # skip [toc] if el.tag in ("h1", "h2", "h3", "h4", "h5", "h6"): counted = current_header = toc.slugify(el.text, "-") if headers[counted] > 0: current_header += "_" + str(headers[counted]) headers[counted] += 1 ixs = defaultdict(int) else: ixs[el.tag] += 1 el.set("id", "-".join([current_header, el.tag, str(ixs[el.tag])]))
def save(self, *args, **kwargs): self.url_slug = slugify(self.title) md = markdown.Markdown(extensions=[ 'markdown.extensions.extra', 'markdown.extensions.codehilite', TocExtension(slugify=slugify) ]) if not self.excerpt: self.excerpt = strip_tags(md.convert(self.content))[:150] if not self.created_time and self.status == self.STATUS_CHOICES.published: self.created_time = self.created_time if self.rand_id == "" or self.rand_id == "1a2b3c4d": self.rand_id = ''.join( random.sample(string.ascii_letters + string.digits, 8)) super(Post, self).save(*args, **kwargs)
def _make_title_full_command_path(ctx: click.Context, depth: int) -> Iterator[str]: """Create the markdown heading for a command, showing the full command path. This style accomodates nested commands by showing: * The full command path for headers and permalinks (eg `# git commit` and `http://localhost:8000/#git-commit`) * The command leaf name only for TOC entries (eg `* commit`). We do this because a TOC naturally conveys the hierarchy, whereas headings and permalinks should be namespaced to convey the hierarchy. See: https://github.com/DataDog/mkdocs-click/issues/35 """ text = ctx.command_path # 'git commit' permalink = slugify(ctx.command_path, "-") # 'git-commit' toc_label = ctx.info_name # 'commit' # Requires `attr_list` extension, see: https://python-markdown.github.io/extensions/toc/#custom-labels attributes = f"#{permalink} data-toc-label='{toc_label}'" yield f"{'#' * (depth + 1)} {text} {{ {attributes} }}" yield ""
def contains_anchor(markdown: str, anchor: str) -> bool: """Check if a set of Markdown source text contains a heading that corresponds to a given anchor.""" for line in markdown.splitlines(): # Markdown allows whitespace before headers and an arbitrary number of #'s. heading_match = HEADING_PATTERN.match(line) if heading_match is not None: heading = heading_match.groups()[0] # Headings are allowed to have images after them, of the form: # # Heading [![Image](image-link)] or ![Image][image-reference] # But these images are not included in the generated anchor, so remove them. heading = re.sub(IMAGE_PATTERN, '', heading) anchor_slug = slugify(heading, '-') if anchor == anchor_slug: return True link_match = HTML_LINK_PATTERN.match(line) if link_match is not None and link_match.group(1) == anchor: return True return False
def generate_resources_and_action_ids(json_content): """Generate an ID for every resource and action in the given JSON file Arguments: json_content - JSON object containing the API parsed definition""" for resource_group in json_content["resourceGroups"]: for resource in resource_group["resources"]: if len( resource["name"] ) > 0: resource["id"] = 'resource_' + slugify( resource["name"], '-' ) else: resource["id"] = 'resource_' + slugify( resource["uriTemplate"], '-' ) for action in resource["actions"]: if len( action["name"] ) > 0: action["id"] = 'action_' + slugify( action["name"],'-' ) else: if len( action["attributes"]["uriTemplate"] ) > 0: action["id"] = 'action_' + slugify( action["attributes"]["uriTemplate"], '-' ) else: if resource["ignoreTOC"] == True: action["id"] = 'action_' + slugify( resource["uriTemplate"] + action["method"], '-' ) else: action["id"] = 'action_' + slugify( resource["name"] + action["method"], '-' )
def wiki_slugify(*args, **kwargs): return HEADER_ID_PREFIX + slugify(*args, **kwargs)
def anchor(self): from markdown.extensions.toc import slugify return slugify(self.title, '-')
def save(self, *args, **kwargs): self.slug = slugify(self.name) super(Category, self).save(*args, **kwargs)
def salted_slug(value, separator): slug = slugify(value, separator) salt = ''.join([str(randint(0, 9)) for x in range(5)]) return slug + separator + salt