def filter_gist(value): gist_base_url = 'https://api.github.com/gists/' replacements = [] pattern = re.compile('\[gist (\d+) *\]', flags=re.IGNORECASE) ids = re.findall(pattern, value) if not len(ids): return (replacements, value, None,) for gist_id in ids: gist_text = "" resp = requests.get('%s%d' % (gist_base_url, int(gist_id))) if resp.status_code != 200: return value content = simplejson.loads(resp.content) # Go through all files in gist and smash 'em together for name in content['files']: gist_text += "%s" % ( _colorize_table(content['files'][name]['content'], None)) if content['comments'] > 0: gist_text += '<hr><p class="github_convo">Join the conversation on ' + \ '<a href="%s#comments">github</a> (%d comments)</p>' % ( content['html_url'], content['comments']) text_hash = md5(gist_text).hexdigest() replacements.append([text_hash, gist_text]) value = re.sub(pattern, text_hash, value, count=1) return (replacements, value, True,)
def filter_gitstyle(value): replacements = [] pattern = re.compile("```(?P<lang>[^\\n\\s`]+)+?(?P<code>[^```]+)+?```", re.I | re.S | re.M) if len(re.findall(pattern, value)) == 0: return ( replacements, value, None, ) git_styles = re.finditer(pattern, value) for gs in git_styles: try: lang = gs.group('lang') except IndexError: lang = None text = _colorize_table(gs.group('code'), lang=lang) text_hash = md5(text.encode('utf-8')).hexdigest() replacements.append([text_hash, text]) value = re.sub(pattern, text_hash, value, count=1) return ( replacements, value, True, )
def filter_inline(value): replacements = [] pattern = re.compile('\\[code(\\s+lang=\"(?P<lang>[\\w]+)\")*\\s*' + \ '(?P<more_colors>more_colors)*\\](?P<code>.*?)\\[/code\\]', re.I | re.S | re.M) if len(re.findall(pattern, value)) == 0: return (replacements, value, None,) inlines = re.finditer(pattern, value) for inline_code in inlines: try: lang = inline_code.group('lang') except IndexError: lang = None try: more_colors = (inline_code.group('more_colors') == 'more_colors') except IndexError: more_colors = False text = _colorize_table(inline_code.group('code'), lang=lang) # per-word coloring for user defined names if more_colors: text = add_colors_to(text) text_hash = md5(text.encode('utf-8')).hexdigest() replacements.append([text_hash, text]) value = re.sub(pattern, text_hash, value, count=1) return (replacements, value, True,)
def filter_gist(value): pattern = re.compile('\[gist (\d+) *\]', flags=re.IGNORECASE) ids = re.findall(pattern, value) if not len(ids): return value, None for gist_id in ids: gist_text = "" resp = requests.get('https://api.github.com/gists/%d' % ( int(gist_id))) if resp.status_code != 200: return value content = simplejson.loads(resp.content) # Go through all files in gist and smash 'em together for name in content['files']: gist_text += "%s" % ( _colorize_table(content['files'][name]['content'], None)) if content['comments'] > 0: gist_text += '<hr><p class="github_convo">Join the conversation on ' + \ '<a href="%s#comments">github</a> (%d comments)</p>' % ( content['html_url'], content['comments']) # Replace just first instance of the short code found value = re.sub(pattern, gist_text, markdown.markdown(value), count=1) return (value, True)
def filter_upload(value): pattern = re.compile('\[local (\S+) *\]', flags=re.IGNORECASE) files = re.findall(pattern, value) if not len(files): return value, None for file_path in files: file_path = os.path.join(MEDIA_ROOT, file_path) (file_type, encoding) = mimetypes.guess_type(file_path) if file_type is None: return (value, None) # FIXME: Can we trust the 'guessed' mimetype? if not file_type.startswith('text'): return (value, None) # FIXME: Limit to 1MB right now try: f = open(file_path) except IOError: return (value, None) text = f.read(1048576) f.close() text = _colorize_table(text, None) text += '<hr><br>' value = re.sub(pattern, text, markdown.markdown(value), count=1) return (value, True)
def filter_gist(value): gist_base_url = 'https://api.github.com/gists/' replacements = [] pattern = re.compile('\[gist (\d+) *\]', flags=re.IGNORECASE) ids = re.findall(pattern, value) if not len(ids): return ( replacements, value, None, ) for gist_id in ids: gist_text = "" lang = None resp = requests.get('%s%d' % (gist_base_url, int(gist_id))) if resp.status_code != 200: return ( replacements, value, None, ) content = simplejson.loads(resp.content) # Go through all files in gist and smash 'em together for name in content['files']: _file = content['files'][name] # try and get the language of the file either # by passing filename or by passing the language # specified if 'filename' in _file: lang = _file['filename'] elif 'language' in _file: lang = _file['language'] gist_text += "%s" % (_colorize_table(_file['content'], lang=lang)) if content['comments'] > 0: gist_text += '<hr><p class="github_convo">Join the conversation on ' + \ '<a href="%s#comments">github</a> (%d comments)</p>' % ( content['html_url'], content['comments']) text_hash = md5(gist_text.encode('utf-8')).hexdigest() replacements.append([text_hash, gist_text]) value = re.sub(pattern, text_hash, value, count=1) return ( replacements, value, True, )
def filter_inline(value): replacements = [] pattern = re.compile('\\[code\\](.*?)\\[/code\\]', re.I | re.S | re.M) inlines = re.findall(pattern, value) if not len(inlines): return (replacements, value, None,) for inline_code in inlines: text = _colorize_table(inline_code, None) text_hash = md5(text).hexdigest() replacements.append([text_hash, text]) value = re.sub(pattern, text_hash, value, count=1) return (replacements, value, True,)
def filter_upload(value): replacements = [] pattern = re.compile('\[local (\S+) *\]', flags=re.IGNORECASE) files = re.findall(pattern, value) if not len(files): return ( replacements, value, None, ) for file_name in files: colorize = True file_path = os.path.join(MEDIA_ROOT, file_name) (file_type, encoding) = mimetypes.guess_type(file_path) if file_type is None: colorize = False # FIXME: Can we trust the 'guessed' mimetype? if file_type in ['application', 'text']: colorize = False # FIXME: Limit to 1MB right now try: f = open(file_path) text = f.read(1048576) f.close() except IOError: colorize = False if colorize: text = _colorize_table(text, lang=file_name) text_hash = md5(text.encode('utf-8')).hexdigest() else: text = '[local %s]' % file_name text_hash = md5(text.encode('utf-8')).hexdigest() replacements.append([text_hash, text]) value = re.sub(pattern, text_hash, value, count=1) return ( replacements, value, True, )
def filter_inline(value): replacements = [] pattern = re.compile('\\[code(\\s+lang=\"(?P<lang>[\\w]+)\")*\\s*' + \ '(?P<more_colors>more_colors)*\\](?P<code>.*?)\\[/code\\]', re.I | re.S | re.M) if len(re.findall(pattern, value)) == 0: return ( replacements, value, None, ) inlines = re.finditer(pattern, value) for inline_code in inlines: try: lang = inline_code.group('lang') except IndexError: lang = None try: more_colors = (inline_code.group('more_colors') == 'more_colors') except IndexError: more_colors = False text = _colorize_table(inline_code.group('code'), lang=lang) # per-word coloring for user defined names if more_colors: text = add_colors_to(text) text_hash = md5(text.encode('utf-8')).hexdigest() replacements.append([text_hash, text]) value = re.sub(pattern, text_hash, value, count=1) return ( replacements, value, True, )
def filter_url(value): pattern = re.compile('\[url (\S+) *\]', flags=re.IGNORECASE) urls = re.findall(pattern, value) if not len(urls): return (value, None) for url in urls: url = _validate_url(url) if url is None: return (value, None) # Validate that value is actually a url resp = requests.get(url) if resp.status_code != 200: return (value, None) value = re.sub(pattern, _colorize_table(resp.content, None), markdown.markdown(value), count=1) return (value, True)