def test_checklists():
    source = """
* [ ] foo
* [x] bar
* [ ] baz
    """.strip()

    expected = """
<ul class="check-list">
<li><input type="checkbox" disabled> foo</li>
<li><input type="checkbox" disabled checked> bar</li>
<li><input type="checkbox" disabled> baz</li>
</ul>
    """.strip()

    html = markdown(source,
            extensions=[ChecklistExtension(list_class="check-list")])
    assert html == expected

    expected = """
<ul class="checklist">
<li><label><input type="checkbox" disabled> foo</label></li>
<li><label><input type="checkbox" disabled checked> bar</label></li>
<li><label><input type="checkbox" disabled> baz</label></li>
</ul>
    """.strip()

    html = markdown(source,
            extensions=[ChecklistExtension(render_item=render_item)])
    assert html == expected
Example #2
0
 def save(self, force_insert=False, force_update=False):
     self.body_html = markdown(self.body, ['codehilite', 'headerid'])
     self.body_html = markdown(self.body, output_format='html5',
                               extensions=['codehilite', ])
     if self.excerpt:
         self.excerpt_html = markdown(self.excerpt, ['codehilite'])
     super(Entry, self).save(force_insert, force_update)
Example #3
0
    def update(_id, title, content, c_name, c_id, tags):
        summary = content[0:80] + '...'
        html = markdown.markdown(content)
        diary = {
                "title": title,
                "content": content,
                "category": c_name,
                "category_id": int(c_id),
                "tags": tags,
                "summary": markdown.markdown(summary),
                "html": html,
                "update_time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                }

        publish_time = Diary.get_detail(_id).get('publish_time') 
        last_cid = Diary.get_detail(_id).get('category_id') 
        last_tags = Diary.get_detail(_id).get('tags') 

        db.diaries.update({'_id': int(_id)}, {'$set': diary})

        #Save for category
        Category.update_diary(c_id, _id, title, publish_time, last_cid)

        if last_tags is not None:
            # delete it from old tags
            Tag.del_diary(_id)
        
        if tags is not None:
            diary = Diary.get_detail(_id)
            # save tags
            for tag in tags:
                Tag.add(tag, diary)
        return 
Example #4
0
 def on_changed_body(target, value, oldvalue, initiator):
     allowed_tags = ['a', 'abbr', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol',
                     'ul', 'pre', 'strong', 'h1', 'h2', 'h3', 'p']
     target.body_html = bleach.linkify(bleach.clean(markdown(value, output_format='html'),
                                                    tags=allowed_tags, strip=True))
     target.body_abstract = bleach.linkify(bleach.clean(markdown(value, output_format='html'),
                                                     tags=['p'], strip=True))
Example #5
0
    def add(title, content, c_name, c_id, tags):
        summary = content[0:80] + '...'
        html = markdown.markdown(content)

        diary = {
                "_id": Kid.kid(),
                "title": title,
                "category": c_name,
                "category_id": int(c_id),
                "tags": tags,
                "content": content,
                "html": html,
                "summary": markdown.markdown(summary),
                "publish_time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
                }
        db.diaries.save(diary)

        # save category
        Category.update_diary(c_id, diary.get('_id'), title, diary.get('publish_time'))

        if tags is not None:
            # save tags
            for tag in tags:
                Tag.add(tag, diary)
        return 
Example #6
0
    def post(self, gist_id):
        options = self.get_base_options()
        gist = self.find_gist(gist_id)
        if gist.user != options['user']:
            raise tornado.web.HTTPError(403, "Not your gist")

        # fix 404 error if no description be posted
        # use gist_id as default description
        description = self.get_argument('description', u'GIST-%s' % gist_id).strip()
        discussion = self.get_argument('discussion', u'')
        tags = self.get_argument('tags', u'')
        tags = [x.strip() for x in tags.split(',') if x.strip()]

        try:
            # test if the markdown plain text isn't broken
            markdown.markdown(discussion, safe_mode="escape")
        except Exception:
            raise
        gist.description = description
        gist.discussion = discussion
        gist.discussion_format = u'markdown'
        gist.tags = tags
        gist.update_date = datetime.datetime.now()
        gist.save()
        url = self.reverse_url('view_gist', gist.gist_id)
        self.redirect(url)
Example #7
0
 def post(self):
     key = self.get_argument("key", None)
     if key:
         entry = Entry.get(key)
         entry.title = self.get_argument("title")
         entry.body = self.get_argument("markdown")
         entry.markdown = markdown.markdown(self.get_argument("markdown"))
     else:
         title = self.get_argument("title")
         slug = unicodedata.normalize("NFKD", title).encode(
             "ascii", "ignore")
         slug = re.sub(r"[^\w]+", " ", slug)
         slug = "-".join(slug.lower().strip().split())
         if not slug: slug = "entry"
         while True:
             existing = db.Query(Entry).filter("slug =", slug).get()
             if not existing or str(existing.key()) == key:
                 break
             slug += "-2"
         entry = Entry(
             author=self.current_user,
             title=title,
             slug=slug,
             body=self.get_argument("markdown"),
             markdown=markdown.markdown(self.get_argument("markdown")),
         )
     entry.put()
     self.redirect("/entry/" + entry.slug)
Example #8
0
    def from_text_file(_file, _scene_name, _main_logger):
        new_scene = Scene(_scene_name)

        d = _file.read()
        d = d.strip()

        # Find the start of the options and parse them
        new_option = None
        if scene_options_marker in d:
            scene_text, options = d.split(scene_options_marker)
            for line in options.splitlines():
                line = line.strip()
                if len(line):
                    r = re.match(r"\[(.*)\]", line)
                    if r:
                        if new_option:
                            new_option.trans_text = markdown.markdown(new_option.trans_text)
                            new_scene.options.append(new_option)
                        new_option = Option()
                        new_option.next_scene_name = r.group(1)
                    else:
                        r =re.match(r"(.*)=(.*)", line)
                        if r:
                            parameter_name = r.group(1).rstrip()
                            parameter_value = r.group(2).lstrip()
                            if new_option.__dict__.has_key(parameter_name):
                                if type(new_option.__dict__[parameter_name]) == type(1):
                                    new_option.__dict__[parameter_name] = int(parameter_value)
                                else:
                                    new_option.__dict__[parameter_name] = str(parameter_value)
                            else:
                                warning = "Unknown parameter '%s' in scene '%s'" % (parameter_name, _scene_name)
                                warnings.append(warning)
                                _main_logger.log(logging.WARNING, warning)
                        else:
                            if "=" in line:
                                warning = "Could not parse option '%s' of scene '%s'" % (line, _scene_name)
                                warnings.append(warning)
                                _main_logger.log(logging.WARNING, warning)
                            else:
                                new_option.trans_text += line + " "
        else:
            warning = "Could not find options in scene '%s'" % _scene_name
            warnings.append(warning)
            _main_logger.log(logging.WARNING, warning)
            scene_text = d
        if new_option:
            new_option.trans_text = markdown.markdown(new_option.trans_text)
            new_scene.options.append(new_option)

        # Convert the text from Markdown to HTML
        html = markdown.markdown(scene_text)
        if "</h1>" in html:
            header, description = html.split("</h1>")
            new_scene.header = header + "</h1>"
            new_scene.text = description
        else:
            new_scene.text = html

        return new_scene
def make_page(filename):
    calling_dir = os.getcwd()
    with codecs.open(filename, encoding='utf-8') as f:
        lines = [line for line in f]
    title_line = lines.pop(0)
    header = markdown.markdown(title_line)
    title = title_to_text(title_line)
    lines = add_code_blocks(lines)
    slides = slides_from(lines)
    if slides:
        slides = '<div>\n' + title + '\n</div>\n' + slides
        slides_start = file_or_bust(calling_dir, 'slides_header.html')
        slides_end = file_or_bust(calling_dir, 'slides_footer.html')
        slides_html = slides_start + slides + slides_end
    else:
        slides_html = None
    regex = re.compile(r'([0-9]{4})(?:.?)([0-9]{2})(?:.?)([0-9]{2})')
    match = regex.search(calling_dir)
    if match:
        date_string = ''.join(match.groups())
        date = datetime.strptime(date_string, '%Y%m%d')
        date_string = datetime.strftime(date, '%A %B %e, %Y')
        header += '\n<p class="date">' + date_string + '</p>\n'
    body = markdown.markdown("".join(lines))
    start = file_or_bust(calling_dir, 'header.html')
    start = start.replace('HEAD_TITLE', title)
    end = file_or_bust(calling_dir, 'footer.html')
    if slides:
        slides_loader = file_or_bust(calling_dir, 'slides_loader.html')
        body = body + slides_loader
    plain_html = start + header + body + end
    return plain_html, slides_html
Example #10
0
    def generate_summary_content(cls, updates):
        content = {}
        for msg in updates:
            for header, text, garbage in msg.parse():
                # do some sort of standardizing of case and whitespace to avoid duplicate headers
                header = header.strip().title()
                if header not in content:
                    content[header] = {}
                # use members_dict.get in case the sender has been removed from the MEMBERS list since
                # they sent an email to the list (although this is unlikely).
                sender = members_dict.get(msg.sender.lower(), msg.sender)
                if sender not in content[header]:
                    content[header][sender] = {"text": text, "html": markdown(text)}
                else:
                    content[header][sender]["text"] += text
                    content[header][sender]["html"] += markdown(text)
        sorted_content = SortedDict()

        def content_key(s):
            try:
                return int(s.split(".")[0])
            except:
                return s

        for k in sorted(content.keys(), key=content_key):
            sorted_content[k] = content[k]
        return sorted_content
def write_issues(response):
    "output a list of issues to csv"
    if not r.status_code == 200:
        raise Exception(r.status_code)

    for issue in r.json():
        labels = issue['labels']
        for label in labels:
            # Control to get only the issue, not the issue comments comments
            if IMPORTCHILDS:
                # Retrive Parent Issues
                if not issue['number'] in issues:
                    print 'Issue: ' + str(issue['number'])
                    issues.append(issue['number'])
                    # Convert Markdown to HTML
                    try:
                        html = markdown(issue['body'].encode('utf-8'))
                    except UnicodeDecodeError:
                        html = issue['body'].encode('utf-8')
                        print("Oops!  That was no valid char.  Saved without format ...")

                    csvout.writerow([issue['number'], issue['title'].encode('utf-8'), html, 'New', issue['created_at']  ])
                # TO DO: here we have the childs, we could insert in another CSV
                else:
                    print 'Issue Child'
            else:
                try:
                    html = markdown(issue['body'].encode('utf-8'))
                except UnicodeDecodeError:
                    html = issue['body'].encode('utf-8')
                    print("Oops!  That was no valid char.  Saved without format ...")
                csvout.writerow([issue['number'], issue['title'].encode('utf-8'), issue['body'].encode('utf-8'), 'New', issue['created_at']])
    def currentItemChanged(self):
        item = self.lessonsTree.currentItem()
        if item:
            if hasattr(item, "lesson"):
                self.btnRemove.setText("Uninstall lesson")
                self.btnRemove.setEnabled(True)
                self.btnRunLesson.setEnabled(True)
                if os.path.exists(item.lesson.description):
                    with codecs.open(item.lesson.description, encoding="utf-8") as f:
                        html = "".join(f.readlines())
                        if item.lesson.description.endswith(".md"):
                            html = markdown.markdown(html)
                    self.webView.document().setMetaInformation(QTextDocument.DocumentUrl,
                                                               QUrl.fromUserInput(item.lesson.description).toString())
                    self.webView.setHtml(html)
                else:
                    self.webView.setHtml("<p>{}</p>".format(item.lesson.description))
            else:
                self.btnRunLesson.setEnabled(False)
                self.btnRemove.setText("Uninstall lessons group")
                self.btnRemove.setEnabled(True)
                if os.path.exists(item.description):
                    with codecs.open(item.description, encoding="utf-8") as f:
                        html = "".join(f.readlines())
                    if item.description.endswith(".md"):
                        html = markdown.markdown(html)
                    self.webView.document().setMetaInformation(QTextDocument.DocumentUrl,
                                                               QUrl.fromUserInput(item.description).toString())
                else:
                    html = item.description
                self.webView.setHtml(html)

        else:
            self.btnRemove.setEnabled(False)
            self.btnRunLesson.setEnabled(False)
def theater():
    """ Serves the theater view from the index URL """
    # Store scene list on global object
    g.standard_scenes = STANDARD_SCENES

    # Render markdown from about ('home.md') file and store on global object
    with open(os.path.join(PATHS['home'], 'home.md')) as home_file:
        g.home = Markup(markdown(home_file.read()))

    # Load project index data structure into global object
    with open(os.path.join(PATHS['projects'],'project_index.json')) as index_file:
        g.project_index = json.load(index_file)['project_index']

    # Create scenes dict on global object and populate with standard scenes...
    g.scenes = {}
    for scene in g.standard_scenes:
        g.scenes[scene] = Markup(render_template(scene + '.html'))
    # ...and project scenes
    for filename in os.listdir(PATHS['projects']):
        if filename.endswith('.md'):
            with open(os.path.join(PATHS['projects'], filename)) as project_file:
                g.scenes[filename.replace('.md', '')] = Markup(markdown(project_file.read()))

    # Render page
    return render_template('theater.html')
Example #14
0
def markdown(value, arg=''):
    extensions = [e for e in arg.split(',') if e]
    if extensions and extensions[0] == 'safe':
        extensions = extensions[1:]
        return mark_safe(mkdn.markdown(force_text(value), extensions, safe_mode=True, enable_attributes=False))
    else:
        return mark_safe(mkdn.markdown(force_text(value), extensions, safe_mode=False))
Example #15
0
  def build_items(self):
    ''' Perform the markdown and jinja2 steps on the raw Items and write to files '''

    def write_out(self,item):
      """Make the item into a jinja template, render it and write the output"""
      template = self.jinja_env.from_string(item.content)
      item.rendered = template.render(page=item, site=self.site)
      print ("Creating: " + item.tpath + "/" + item.name + ".html")
      f = open(item.tpath + "/" + item.name + ".html", "w")
      f.write(item.rendered)
      f.close()
    
    for item in self.posts:
	  # posts are considered to be all in the block called 'content'
      tt = copy.deepcopy(item)
      if item.ext == ".md" or item.ext == ".markdown":
        item.content = markdown.markdown(item.raw, extensions=['markdown.extensions.extra']) # This means we have the converted posts for all our pages should we need it (like the index page that might show a complete post)
        # Since this is markdown, we cant add an extends statement so we set the whole thing as a content block
        tt.content = markdown.markdown(tt.raw, extensions=['markdown.extensions.extra'])
        tt.content = "{% block content %}\n" + tt.content + "{% endblock %}"
        tt.content = "{{% extends '{0}' %}}\n".format(item.metadata["layout"]) + tt.content
  
      write_out(self,tt) 
     
    for item in self.pages:
      tt = copy.deepcopy(item)
      tt.content = "{{% extends '{0}' %}}\n".format(item.metadata["layout"]) + tt.content
      write_out(self,tt)
Example #16
0
def parse_content(content):
  """Parses through the content of a file. Converts using markdown.

  Note that funnel uses sections and sections are defined as:

      === section name ===

      markdown here

      === section name ===

  The two `=== section name ==` are the boundaries. Everything outside of those
  boundaries won't count.

  This method returns a dictionary of section_name: html. If no
  section is defined, the section name of "main" will be used.
  """
  content = content.strip()
  sections = SECTION_HEADERS_REGEX.findall(content)
  sections = {s[1] for s in sections}
  if len(sections) == 0:
    return {"main": markdown.markdown(content)}
  else:
    s = {}
    for section in sections:
      matches = re.findall(SECTION_BODIES_REGEX.format(name=section), content, flags=re.S)
      if not len(matches):
        raise ValueError("Cannot find section {0}. Check for unmatched ending tags?".format(section))
      md = matches[0].strip()
      s[section] = markdown.markdown(md)

    return s
Example #17
0
def convert_one(part, config, charset):
    text = part.get_payload(decode=True)
    if part.get_charset():
        charset = get_charset_from_message_fragment(part)
    if not isinstance(text, six.text_type):
        # decode=True only decodes the base64/uuencoded nature, and
        # will always return bytes; gotta decode it
        if charset is not None:
            text = text.decode(charset)
        else:
            try:
                text = text.decode('ascii')
            except UnicodeError:
                # this is because of message.py:278 and seems like a hack
                text = text.decode('raw-unicode-escape')
    if not text.startswith('!m'):
        return None
    text = re.sub(r'\s*!m\s*', '', text, re.M)
    if '\n-- \n' in text:
        pre_signature, signature = text.split('\n-- \n')
        md = markdown.markdown(pre_signature, output_format="html5")
        md += '\n<div class="signature" style="font-size: small"><p>-- <br />'
        md += '<br />'.join(signature.split('\n'))
        md += '</p></div>'
    else:
        md = markdown.markdown(text)
    if config.css:
        md = '<style>' + config.css + '</style>' + md
        md = pynliner.fromString(md)
    message = MIMEText(md, 'html', _charset="UTF-8")
    return message
Example #18
0
 def save(self, *args, **kwargs):
   """override save method and transform markdown into html"""
   if self.txt_en:
     self.html_en = markdown(self.txt_en)
   if self.txt_it:
     self.html_it = markdown(self.txt_it)
   super(TextBlockEnIt, self).save(*args, **kwargs)
Example #19
0
def markup(text, small_headings=False, no_follow=True, escape=False,
           scale_headings=True):
    """Markup text using the markup language specified in the settings.
    """
    if MARKUP_LANGUAGE == 'markdown':
        import markdown
        safe_mode = 'escape' if escape else None
        try:
            import pygments
            options = ['codehilite', 'extra', 'toc']
            if scale_headings:
                options.append('headerid(level=3, forceid=False)')
            text = markdown.markdown(text, options, safe_mode=safe_mode)

        except ImportError:
            options = ['extra', 'toc']
            if scale_headings:
                options.append('headerid(level=3, forceid=False)')
            text = markdown.markdown(text, options, safe_mode=safe_mode)
    
    if small_headings:
        text = re.sub('<(/?h)[1-6]', '<\g<1>5', text)

    if no_follow:
        text = re.sub('<a (?![^>]*nofollow)', '<a rel="nofollow" ', text)

    return text
Example #20
0
 def save(self, *args, **kwargs):
     if self.pub_date is None:
         self.pub_date = datetime.datetime.now()
     if self.excerpt:
         self.excerpt_html = markdown.markdown(self.excerpt)
     self.body_html = markdown.markdown(self.body, safe_mode = True)
     super(Entry, self).save(*args, **kwargs)
Example #21
0
    def clean(self):
        """Called by Mongoengine on every ``.save()`` to the object.

        Updates date_modified, renders the markdown into the HTML fields, and
        validates datetimes to ensure the event ends after it starts.

        :raises: :class:`wtforms.validators.ValidationError`
        """
        self.date_modified = now()

        if self.short_description_markdown:
            self.short_description = markdown.markdown(self.short_description_markdown,
                                                       ['extra', 'smarty'])

        if self.long_description_markdown:
            self.long_description = markdown.markdown(self.long_description_markdown,
                                                      ['extra', 'smarty'])

        if (self.start_date and
                self.end_date and
                self.start_date > self.end_date):
            raise ValidationError("Start date should always come before end "
                                  "date. Got (%r,%r)" % (self.start_date,
                                                         self.end_date))
        # Check times against None, because midnight is represented by 0.
        if (self.start_date == self.start_time and
                self.start_time is not None and
                self.end_time is not None and
                self.start_time > self.end_time):
            raise ValidationError("Start time should always come before end "
                                  "time. Got (%r,%r)" % (self.start_time,
                                                         self.end_time))
Example #22
0
def get_meta_data(filename):
    """Get the meta-data from posts.
    """
    log.debug('Parsing meta-data from %s' % filename)
    with io.open(filename, 'rt', encoding='utf-8') as f:
        content = f.read()

    meta = {}
    post_match = POST_RE.match(content)

    if post_match:
        meta = load(post_match.group('meta'), Loader=Loader)
        body = post_match.group('body').strip()
        if 'layout' in meta:
            # The body is pure Markdown without any Jinja syntax
            meta['content'] = markdown.markdown(body, ['codehilite'])
            meta['raw'] = wrap_jinja2(body, layout=meta['layout'])
        else:
            # The body contains Jinja syntax
            body_without_jinja = strip_jinja2(body)
            meta['content'] = markdown.markdown(
                body_without_jinja, ['codehilite'])
            meta['raw'] = body
    else:
        meta['raw'] = content.strip()

    meta['filename'] = filename
    return meta
Example #23
0
 def save(self, force_insert = False, force_update = False):
     if not self.id:
         self.pub_date = datetime.datetime.now()
     self.updated_date = datetime.datetime.now()
     self.excerpt_html = markdown(self.excerpt)
     self.body_html = markdown(self.body)
     super(Entry, self).save(force_insert, force_update)
Example #24
0
	def save(self, *args, **kwargs):
		if self.id:
			self.Create_Draft()
		if not self.body:
			self.body = 'No text entered.'
			self.body_html = 'No text entered.'
		if not self.guid:
			#GUID Generation
			guid_base = "%s-%s-%s" % (self.user, self.pub_date, self.title)
			guid_encoded = guid_base.encode('ascii', 'ignore')
			guid = md5(guid_encoded).hexdigest()[:16]
			self.guid = guid
		if not self.pub_date:
			self.pub_date = datetime.datetime.now
		if self.content_format == u'markdown':
			self.deck_html = markdown.markdown(smart_unicode(self.deck))
			self.body_html = markdown.markdown(smart_unicode(self.body))
		else:
			self.body_html = self.body
			self.deck_html = self.deck
		if not self.title:
			self.title = guid
			self.slug = slugify(guid)
		self.slug = slugify(self.title)
		super(Entry, self).save(*args, **kwargs)
Example #25
0
    def gener_docs_header(self, branch, one_page):
        header = """
<div id="headwrap" class="columnlist">
    <div id="headl" class="column">{0}</div>
    <div id="headr" class="column">{1}</div>
</div>
    """

        lheader = """
### [Home](/) -> [Documentation][{ot}]

[one_page]: /doc/user_guide.html
[mul_page]: /doc/mpage/index.html """

        rheader = """
### [{type}][{t}]

[one_page]: /doc/user_guide.html
[mul_page]: /doc/mpage/index.html """

        env = {
            'type' : 'Page Per Chapter' if one_page else 'All in One Page',
            'ot'   : 'one_page' if one_page else 'mul_page',
            't'    : 'mul_page' if one_page else 'one_page',
        }
        lheader = markdown(lheader.format(**env), extensions=mdext)
        rheader = markdown(rheader.format(**env), extensions=mdext)

        return header.format(lheader, rheader)
Example #26
0
def project(request, project_name):
    project = Project.objects.get(short_name=project_name)
    related_projects = Project.objects.filter(related=project.id)
    press_links = Press.objects.filter(projects=project.id)
    media_links = Media.objects.filter(projects=project.id)
    photo_links = Photo.objects.filter(project=project.id)
    content_body = None
    article_path = os.path.join(settings.PORTFOLIO_CONTENT_PATH, str(project.short_name), 'articles')
    desc_file = os.path.join(article_path, 'description.markdown')
    if os.path.isfile(desc_file):
        with open(desc_file, "r") as f:
            content_body = markdown.markdown(f.read())
    articles = []
    for (root, dirs, files) in os.walk(article_path):
        for f in files:
            if os.path.splitext(f)[-1] == ".markdown":
                # parse the article name from the filename
                filename = os.path.split(f)[-1]
                urlname = " ".join(os.path.splitext(filename)[0].split("-")) #" ".join("-".split(os.path.splitext(filename)[0]))                
                urltitle = string.capwords(urlname)
                articles.append({"name": urltitle,"filepath": "/".join([project.short_name,"articles",os.path.splitext(filename)[0]])})

    description = project.description
    if project.description_long is not None:
        description = markdown.markdown(project.description_long)

    photo_thumbnails = {}
    for photo in photo_links:
        if photo.flickr_url is not None:
            photo.photo_url = photo.flickr_url
            photo.thumbnail_url = re.sub(r'.jpg', r'_t.jpg', photo.flickr_url)
    return render_to_response('portfolio/project.djhtml', {"project" : project, "desc": description, "press_links": press_links, "media_links": media_links, "photo_links": photo_links, "photo_thumbnails": photo_thumbnails, "related_projects": related_projects, "articles": articles,  "content_body": content_body })
Example #27
0
    def content(self):
        page = self.get_page(self.request.matchdict['uuid'])

        if not page:
            raise HTTPNotFound()

        if page.linked_pages:
            linked_pages = to_eg_objects(self.workspace.S(Page).filter(
                uuid__in=page.linked_pages))
        else:
            linked_pages = []

        category = None
        if page.primary_category:
            category = self.get_category(page.primary_category)

        if page.language != self.locale:
            raise HTTPNotFound()

        context = {
            'page': page,
            'linked_pages': linked_pages,
            'primary_category': category,
            'content': markdown(page.content),
            'description': markdown(page.description),
        }
        context.update(self.get_comment_context(page))
        return context
Example #28
0
def mkdown(target, source, env):
    base = ''
    sourcename = str(source[0])
    sourcedirname = os.path.dirname(sourcename)
    while sourcedirname != '':
        base = '../' + base
        sourcedirname = os.path.dirname(sourcedirname)

    mkstr = source[0].get_text_contents()
    titlere = re.compile(r"^\s*#\s*([^\n]*)(.*)", re.DOTALL)
    title = titlere.findall(mkstr)
    if len(title) == 0:
        title = "Deft"
    else:
        mkstr = title[0][1]
        title = mmdd.markdown(title[0][0])
        title = title[3:len(title)-4]
    mkstr = string.Template(mkstr).safe_substitute(base = base)
    sidebar = string.Template(source[1].get_text_contents()).safe_substitute(base = base)
    template = string.Template(source[2].get_text_contents())

    htmlfile = str(target[0])
    f = open(str(target[0]), 'w')
    f.write(template.safe_substitute(base = base,
                                     title = title,
                                     content = mmdd.markdown(mkstr, extensions=['mathjax']),
                                     sidebar = mmdd.markdown(sidebar, extensions=['mathjax'])))
    f.close()
Example #29
0
    def save(self, **kwargs):
        new_revision = not self.id
        silent_update =  kwargs.has_key('silent_update')
        if silent_update:
            kwargs.pop('silent_update')
        if new_revision and self.pub_date is None:
            self.pub_date = datetime.datetime.now()
        if not silent_update:
            self.updated_date = datetime.datetime.now()

        # Use safe_mode in Markdown to prevent arbitrary input
        # and strip all html tags from CharFields
        self.version = strip_tags(self.version)
        self.authors = strip_tags(self.authors)
        self.changes_html = markdown(self.changes, safe_mode=True)
        self.description_html = markdown(self.description, safe_mode=True)
        self.tags = strip_tags(self.tags)
        self.language = strip_tags(self.language)
        self.os_license = strip_tags(self.os_license)
        self.paper_bib = strip_tags(self.paper_bib)
        self.operating_systems = strip_tags(self.operating_systems)
        self.dataformats = strip_tags(self.dataformats)
        super(Revision, self).save(kwargs)

        # Update authorlist, taglist, licenselist, langaugelist, opsyslist
        self.update_list('authorlist','Author','authors')
        self.update_list('taglist','Tag','tags')
        self.update_list('licenselist','License','os_license')
        self.update_list('languagelist','Language','language')
        self.update_list('opsyslist','OpSys','operating_systems')
        self.update_list('dataformatlist','DataFormat','dataformats')

        # send out notifications on updates
        if not silent_update:
            self.software.notify_update()
Example #30
0
def markdown_filter(value):
  try:
    if isinstance(value, unicode):
      value = value.decode('utf-8')
    return markdown.markdown(value)
  except UnicodeEncodeError:
    return markdown.markdown(value)
Example #31
0
def main(loc, colorscheme):

    oslist = []
    allmd = []
    group = []
    ap = []

    # Checking correctness of path
    if not os.path.isdir(loc):
        print("Invalid directory. Please try again!", file=sys.stderr)
        sys.exit(1)

    # Writing names of all directories inside 'pages' to a list
    for os_dir in os.listdir(loc):
        oslist.append(os_dir)

    oslist.sort()

    # Required strings to create intermediate HTML files
    header = '<!doctype html><html><head><meta charset="utf-8"><link rel="stylesheet" href="basic.css">'
    if colorscheme != "basic":
        header += '<link rel="stylesheet" href="' + colorscheme + '.css"></head><body>\n'

    header += "</head><body>\n"
    footer = "</body></html>"
    title_content = "<h1 class=title-main>tldr pages</h1>" \
        + "<h4 class=title-sub>Simplified and community-driven man pages</h4>" \
        + "<h6 class=title-sub><em><small>Generated on " + datetime.now().strftime("%c") + "</small></em></h6>" \
        + "</body></html>"

    # Creating title page
    with open("title.html", "w") as f:
        f.write(header + title_content)

    group.append(HTML("title.html").render())

    for operating_sys in oslist:

        # Required string to create directory title pages
        dir_title = "<h2 class=title-dir>" + \
            operating_sys.capitalize() + "</h2></body></html>"

        # Creating directory title page for current directory
        with open("dir_title.html", "w") as os_html:
            os_html.write(header + dir_title)

        group.append(HTML("dir_title.html").render())

        # Creating a list of all md files in the current directory
        for temp in glob.glob(os.path.join(loc, operating_sys, "*.md")):
            allmd.append(temp)

        # Sorting all filenames in the directory, to maintain the order of the PDF
        allmd.sort()

        # Conversion of Markdown to HTML
        for page_number, md in enumerate(allmd, start=1):

            with open(md, "r") as inp:
                text = inp.readlines()

            with open("htmlout.html", "w") as out:
                out.write(header)

                for line in text:
                    if re.match(r'^>', line):
                        line = line[:0] + '####' + line[1:]
                    html = markdown.markdown(line)
                    out.write(html)
                out.write(footer)

            group.append(HTML("htmlout.html").render())
            print("Rendered page {} of the directory {}".format(
                str(page_number), operating_sys))

        allmd.clear()

    # Merging all the documents into a single PDF
    for doc in group:
        for p in doc.pages:
            ap.append(p)

    # Writing the PDF to disk, preserving metadata of first `tldr` page
    group[2].copy(ap).write_pdf('tldr-pages.pdf')

    if os.path.exists("tldr-pages.pdf"):
        print("\nCreated tldr-pages.pdf in the current directory!\n")

    # Removing unnecessary intermediate files
    try:
        os.remove("htmlout.html")
        os.remove("title.html")
        os.remove("dir_title.html")
    except OSError:
        print("Error removing temporary file(s)")
Example #32
0
 def get_deposit_instructions(self, ticker):
     contract = self.session.query(
         models.Contract).filter_by(ticker=ticker).one()
     return markdown.markdown(contract.deposit_instructions,
                              extensions=["extra", "sane_lists", "nl2br"])
Example #33
0
    return "".join(filter(lambda x: x in VALID_URL_CHARS, s))


def load_yaml(p):
    with open(p) as f:
        return yaml.load(f)


if __name__ == "__main__":
    env = jinja2.Environment(
        loader=jinja2.PackageLoader("website"),
        autoescape=jinja2.select_autoescape(["html", "xml"])
    )

    env.filters["sanitize"] = sanitize
    env.filters["md"] = lambda s: jinja2.Markup(markdown.markdown(s))

    os.makedirs("out", exist_ok=True)
    template_index = env.get_template("index.html")
    with open("out/index.html", "w") as f:
        f.write(template_index.render(**load_yaml("content/index.yaml")))

    os.makedirs("out/cv", exist_ok=True)
    template_cv = env.get_template("cv.html")
    with open("out/cv/index.html", "w") as f:
        f.write(template_cv.render(**load_yaml("content/cv.yaml")))

    os.makedirs("out/blog", exist_ok=True)
    template_post = env.get_template("post.html")

    posts = filter(lambda x: not path.isfile(x), os.listdir("content/posts"))
Example #34
0
def format_file(path, skip_up_to_date, dependencies_mod):
    basename = os.path.basename(path)
    basename = basename.split('.')[0]

    output_path = "site/" + basename + ".html"

    # See if the HTML is up to date.
    if skip_up_to_date:
        source_mod = max(os.path.getmtime(path), dependencies_mod)
        dest_mod = os.path.getmtime(output_path)

        if source_mod < dest_mod:
            return

    title = ''
    title_html = ''
    part = None
    template_file = 'page'

    errors = []
    sections = []
    header_index = 0
    subheader_index = 0
    has_challenges = False
    design_note = None
    snippets = None

    # Read the markdown file and preprocess it.
    contents = ''
    with open(path, 'r') as input:
        # Read each line, preprocessing the special codes.
        for line in input:
            stripped = line.lstrip()
            indentation = line[:len(line) - len(stripped)]

            if line.startswith('^'):
                command, _, arg = stripped.rstrip('\n').lstrip('^').partition(
                    ' ')
                arg = arg.strip()

                if command == 'title':
                    title = arg
                    title_html = title

                    # Remove any discretionary hyphens from the title.
                    title = title.replace('&shy;', '')

                    # Load the code snippets now that we know the title.
                    snippets = source_code.find_all(title)

                    # If there were any errors loading the code, include them.
                    if title in book.CODE_CHAPTERS:
                        errors.extend(source_code.errors[title])
                elif command == 'part':
                    part = arg
                elif command == 'template':
                    template_file = arg
                elif command == 'code':
                    contents = insert_snippet(snippets, arg, contents, errors)
                else:
                    raise Exception('Unknown command "^{} {}"'.format(
                        command, arg))

            elif stripped.startswith('## Challenges'):
                has_challenges = True
                contents += '<h2><a href="#challenges" name="challenges">Challenges</a></h2>\n'

            elif stripped.startswith('## Design Note:'):
                has_design_note = True
                design_note = stripped[len('## Design Note:') + 1:]
                contents += '<h2><a href="#design-note" name="design-note">Design Note: {}</a></h2>\n'.format(
                    design_note)

            elif stripped.startswith('#') and not stripped.startswith('####'):
                # Build the section navigation from the headers.
                index = stripped.find(" ")
                header_type = stripped[:index]
                header = pretty(stripped[index:].strip())
                anchor = book.get_file_name(header)
                anchor = re.sub(r'[.?!:/"]', '', anchor)

                # Add an anchor to the header.
                contents += indentation + header_type

                if len(header_type) == 2:
                    header_index += 1
                    subheader_index = 0
                    page_number = book.chapter_number(title)
                    number = '{0}&#8202;.&#8202;{1}'.format(
                        page_number, header_index)
                elif len(header_type) == 3:
                    subheader_index += 1
                    page_number = book.chapter_number(title)
                    number = '{0}&#8202;.&#8202;{1}&#8202;.&#8202;{2}'.format(
                        page_number, header_index, subheader_index)

                header_line = '<a href="#{0}" name="{0}"><small>{1}</small> {2}</a>\n'.format(
                    anchor, number, header)
                contents += header_line

                # Build the section navigation.
                if len(header_type) == 2:
                    sections.append([header_index, header])

            else:
                contents += pretty(line)

    # Validate that every snippet for the chapter is included.
    for name, snippet in snippets.items():
        if name != 'not-yet' and name != 'omit' and snippet != False:
            errors.append("Unused snippet {}".format(name))

    # Show any errors at the top of the file.
    if errors:
        error_markdown = ""
        for error in errors:
            error_markdown += "**Error: {}**\n\n".format(error)
        contents = error_markdown + contents

    # Allow processing markdown inside some tags.
    contents = contents.replace('<aside', '<aside markdown="1"')
    contents = contents.replace('<div class="challenges">',
                                '<div class="challenges" markdown="1">')
    contents = contents.replace('<div class="design-note">',
                                '<div class="design-note" markdown="1">')
    body = markdown.markdown(contents, ['extra', 'codehilite', 'smarty'])

    # Turn aside markers in code into spans.
    # <span class="c1">// [repl]</span>
    body = ASIDE_COMMENT_PATTERN.sub(r'<span name="\1"></span>', body)

    up = 'Table of Contents'
    if part:
        up = part
    elif title == 'Table of Contents':
        up = 'Crafting Interpreters'

    data = {
        'title': title,
        'part': part,
        'body': body,
        'sections': sections,
        'chapters': get_part_chapters(title),
        'design_note': design_note,
        'has_challenges': has_challenges,
        'number': book.chapter_number(title),
        'prev': book.adjacent_page(title, -1),
        'prev_type': book.adjacent_type(title, -1),
        'next': book.adjacent_page(title, 1),
        'next_type': book.adjacent_type(title, 1),
        'up': up,
        'toc': book.TOC
    }

    template = environment.get_template(template_file + '.html')
    output = template.render(data)

    # Write the output.
    with codecs.open(output_path, "w", encoding="utf-8") as out:
        out.write(output)

    global total_words
    global num_chapters
    global empty_chapters

    word_count = len(contents.split(None))
    num = book.chapter_number(title)
    if num:
        num = '{}. '.format(num)

    # Non-chapter pages aren't counted like regular chapters.
    if part:
        num_chapters += 1
        if word_count < 50:
            empty_chapters += 1
            print("    {}{}{}{}".format(GRAY, num, title, DEFAULT))
        elif word_count < 2000:
            empty_chapters += 1
            print("  {}-{} {}{} ({} words)".format(YELLOW, DEFAULT, num, title,
                                                   word_count))
        else:
            total_words += word_count
            print("  {}✓{} {}{} ({} words)".format(GREEN, DEFAULT, num, title,
                                                   word_count))
    elif title in ["Crafting Interpreters", "Table of Contents"]:
        print("{}•{} {}{}".format(GREEN, DEFAULT, num, title))
    else:
        if word_count < 50:
            print("  {}{}{}{}".format(GRAY, num, title, DEFAULT))
        else:
            print("{}✓{} {}{} ({} words)".format(GREEN, DEFAULT, num, title,
                                                 word_count))
Example #35
0
    def make_overall_file(self, files):
        header = '''

Assorted Stories
---

Table of Contents:

[TOC]

<div style="width:500px" markdown="1">

'''
        footer = '''

</div>
		'''

        skeys = list(files.keys())
        skeys = natsort.natsorted(
            skeys,
            key=lambda x:
            (files[x]['author'].lower(), x[0].lower(), x[1].lower()))

        tocstr = ""
        tocstr += header
        for story_key in skeys:
            fkeys = list(files[story_key]['files'].keys())

            fkeys = natsort.natsorted(fkeys, key=lambda x: x.lower())

            for fkey in fkeys:

                if len(files[story_key]) == 1:
                    tocstr += "%s: by %s\n" % (story_key[1], story_key[0])
                else:
                    tocstr += "%s (%s): by %s\n" % (
                        story_key[1], files[story_key]['files'][fkey]['fname'],
                        story_key[0])
                tocstr += "------\n"
                tocstr += "\n"
                tocstr += "<div id='%s'></div>\n" % abs(
                    hash(story_key +
                         (files[story_key]['files'][fkey]['fname'], )))
                tocstr += "\n"
                tocstr += "\n"

        tocstr += footer

        formatted = markdown.markdown(tocstr, extensions=["toc", 'extra'])

        soup = WebRequest.as_soup(formatted)
        for story_key in tqdm.tqdm(skeys, "Building overall file"):
            for fpath, file_dict in tqdm.tqdm(
                    files[story_key]['files'].items(),
                    desc="Processing single input"):
                wat_1 = hash(story_key + (file_dict['fname'], ))
                wat_2 = abs(wat_1)
                shash = str(wat_2)
                tgt_divs = soup.find_all("div", id=shash)
                assert len(
                    tgt_divs) == 1, "Expected 1 div, found %s" % len(tgt_divs)
                tgt_div = tgt_divs[0]
                new_div = WebRequest.as_soup(file_dict['content_div'])

                tgt_div.insert(1, new_div.div)

        out = soup.prettify()
        fout_fname = "Aggregate file %s%s%s%s%s.html" % (
            ((" tag %s" % (self.tags, )) if self.tags else ""),
            ((" author %s" % (self.author, )) if self.author else ""),
            ((" with str %s" % (self.str, )) if self.str else ""),
            ((" with inc_str %s" % (self.inc_str, )) if self.inc_str else ""),
            ((" with exc_str %s" % (self.exc_str, )) if self.exc_str else ""),
        )
        while "  " in fout_fname:
            fout_fname = fout_fname.replace("  ", " ")

        with open(fout_fname, 'w') as fp:
            fp.write(out)

        print("Resulting file size: %s" % len(out))
Example #36
0
def moderatesignups():
    global commentHashesAndComments
    commentHashesAndComments = {}
    stringio = StringIO()
    stringio.write('<html>\n<head>\n</head>\n\n')

    # redditSession = loginAndReturnRedditSession()
    redditSession = loginOAuthAndReturnRedditSession()
    submissions = getSubmissionsForRedditSession(redditSession)
    flat_comments = getCommentsForSubmissions(submissions)
    retiredHashes = retiredCommentHashes()
    i = 1
    stringio.write(
        '<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
    stringio.write("<h3>")
    stringio.write(os.getcwd())
    stringio.write("<br>\n")
    for submission in submissions:
        stringio.write(submission.title)
        stringio.write("<br>\n")
    stringio.write("</h3>\n\n")
    stringio.write(
        '<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">'
    )
    stringio.write(
        '<input type="submit" value="Copy display-during-signup.py stdout to clipboard">'
    )
    stringio.write('</form>')
    for comment in flat_comments:
        # print comment.is_root
        # print comment.score
        i += 1
        commentHash = sha1()
        commentHash.update(comment.fullname)
        commentHash.update(comment.body.encode('utf-8'))
        commentHash = commentHash.hexdigest()
        if commentHash not in retiredHashes:
            commentHashesAndComments[commentHash] = comment
            authorName = str(
                comment.author
            )  # can be None if author was deleted.  So check for that and skip if it's None.
            stringio.write("<hr>\n")
            stringio.write('<font color="blue"><b>')
            stringio.write(
                authorName
            )  # can be None if author was deleted.  So check for that and skip if it's None.
            stringio.write('</b></font><br>')
            if ParticipantCollection().hasParticipantNamed(authorName):
                stringio.write(
                    ' <small><font color="green">(member)</font></small>')
                # if ParticipantCollection().participantNamed(authorName).isStillIn:
                #    stringio.write(' <small><font color="green">(in)</font></small>')
                # else:
                #    stringio.write(' <small><font color="red">(out)</font></small>')
            else:
                stringio.write(
                    ' <small><font color="red">(not a member)</font></small>')
            stringio.write(
                '<form action="takeaction.html" method="post" target="invisibleiframe">'
            )
            stringio.write(
                '<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">'
            )
            # stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
            # stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
            # stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
            stringio.write(
                '<input type="submit" name="actiontotake" value="Skip comment">'
            )
            stringio.write(
                '<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">'
            )
            stringio.write('<input type="hidden" name="username" value="' +
                           b64encode(authorName) + '">')
            stringio.write('<input type="hidden" name="commenthash" value="' +
                           commentHash + '">')
            # stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
            stringio.write('</form>')

            stringio.write(
                bleach.clean(markdown.markdown(comment.body.encode('utf-8')),
                             tags=['p']))
            stringio.write("\n<br><br>\n\n")

    stringio.write('</html>')
    pageString = stringio.getvalue()
    stringio.close()
    return Response(pageString, mimetype='text/html')
Example #37
0
 def get_content(self, task):
     content = task.task_spec.documentation
     template = Template(content)
     rendered = template.render(task.data)
     rendered_markdown = markdown.markdown(rendered).replace('\n', '<br>')
     return rendered, rendered_markdown
Example #38
0
def django_view(request):
    contents = strip_preamble(get_contents())
    html = mark_safe(markdown.markdown(contents))  # nosec
    return render(request, 'changelog.html', dict(html=html))
def filter_slugify(value):
    if not value:
        return ''
    value = unicodedata.normalize('NFKD',
                                  value).encode('ascii',
                                                'ignore').decode('ascii')
    value = re.sub(r'[^\w\s-]', '', value).strip().lower()
    return re.sub(r'[-\s]+', '-', value)


global_filters = {
    'groupby_sort': filter_groupby_sort,
    'shuffle': filter_shuffle,
    'slugify': filter_slugify,
    'datetimeformat': filter_datetimeformat,
    'markdown': lambda t: jinja2.Markup(markdown.markdown(t)),
}


# Extend the default jinja sandbox
class DeploySandbox(jinja2.sandbox.SandboxedEnvironment):
    def is_safe_attribute(self, obj, attr, value):
        if obj.__class__.__name__ in ('str',
                                      'unicode') and attr in ('format',
                                                              'format_map'):
            # We reject all format strings for now, due to
            # https://www.palletsprojects.com/blog/jinja-281-released/
            # (until we have it safely patched everywhere, *if* we need this elsewhere)
            return False

        return super(DeploySandbox, self).is_safe_attribute(obj, attr, value)
Example #40
0
 def get_message_as_markdown(self):
     return mark_safe(markdown(self.message, safe_mode='escape'))
Example #41
0
def wrapped_markdown(s):
    return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
Example #42
0
def markdownify(text):
    # safe_mode governs how the function handles raw HTML
    return markdown.markdown(text,
                             safe_mode='escape',
                             extensions=['markdown.extensions.fenced_code'])
def markdown_format(text):
    return mark_safe(markdown.markdown(text))
Example #44
0
def index(request):
    with open('README.md') as fp:
        return HttpResponse(markdown.markdown(fp.read()))
Example #45
0
def make_html(c):
    return markdown.markdown(c)
Example #46
0
def markdown_format(text):
	#避免函数名和markdown模板名起冲突,将函数命名为markdown_format
    return mark_safe(markdown.markdown(text))
    '''使用Django提供的mark_safe方法来标记结果,在模板(template)中作为安全的HTML被渲染
Example #47
0
def strip_markdown(markdown_text):
    html = markdown(markdown_text)
    pure_text = ''.join(BeautifulSoup(html).findAll(text=True))
    return pure_text
Example #48
0
    def __init__(self, pk, login_state, user_info):
        """
        构造
        :param pk: 文章主键
        :param login_state: 用户是否登录
        :param user_info: 登录用户的信息
        """
        self.__error = False
        # 错误判断
        if not Post.objects.filter(pk=pk).exists():
            self.__error = True
        else:
            # 获取文章 Model 对象
            post = Post.objects.get(pk=pk)
            # Markdown 渲染
            post.body = markdown(
                post.body,
                extensions=[
                    'markdown.extensions.sane_lists',
                    'markdown.extensions.extra',
                    'markdown.extensions.codehilite',
                    'markdown.extensions.toc', 'markdown.extensions.abbr',
                    'markdown.extensions.attr_list',
                    'markdown.extensions.def_list',
                    'markdown.extensions.fenced_code',
                    'markdown.extensions.footnotes',
                    'markdown.extensions.smart_strong',
                    'markdown.extensions.meta', 'markdown.extensions.nl2br',
                    'markdown.extensions.tables'
                ])
            self.__post = post

            phase_created = datetime.utcnow().replace(
                tzinfo=pytz.timezone('UTC')) - post.created_time
            phase_modified = datetime.utcnow().replace(
                tzinfo=pytz.timezone('UTC')) - post.modified_time
            # 求时间差
            self.__phase_time = {
                'created': {
                    'days': phase_created.days,
                    'hours': phase_created.seconds // 3600
                },
                'modified': {
                    'days': phase_modified.days,
                    'hours': phase_modified.seconds // 3600
                }
            }

            # 封装登录信息
            self.__login_info = LoginInfo(login_state, user_info)

            # 获取评论信息
            # 先获取所有该文章下的评论
            comments = Comment.objects.filter(post=pk)
            # 求出评论总数量
            self.__comments_length = len(comments)
            # 筛选出所有的一级评论
            comments_level_1 = comments.filter(
                is_child=False).order_by('-time')
            # 建立评论表数据结构
            self.__comments = []
            # 遍历所有一级评论,找到他们下的二级评论,并且按照数据结构存入
            for comment in comments_level_1:
                self.__comments.append({})
                self.__comments[-1]['parent'] = comment
                self.__comments[-1]['children'] = []
                # 寻找他们下面的二级评论
                children = Comment.objects.filter(is_child=True,
                                                  parent=comment.pk)
                # 将二级评论全部存入数据结构中
                for child in children:
                    self.__comments[-1]['children'].append(child)

            # 获取中国时区信息
            timezone_china = timezone(timedelta(hours=timezone_delta_hour))
            # 处理数据,过滤掉不需要的数据
            for comment_dict in self.__comments:
                # 先处理一级评论
                obj1 = KUser.objects.get(pk=comment_dict['parent'].sender)
                new_parent = {
                    'sender': {
                        'user_type': obj1.user_type,
                        'nickname': obj1.nickname,
                        'uid': obj1.uid,
                        'avatar': obj1.avatar,
                        'is_admin': obj1.is_admin,
                        'pk': obj1.pk
                    },
                    'is_child': False,
                    'context': comment_dict['parent'].context,
                    'time':
                    comment_dict['parent'].time.astimezone(timezone_china),
                    'pk': comment_dict['parent'].pk
                }
                comment_dict['parent'] = new_parent
                # 再处理二级评论
                new_children = []
                for child in comment_dict['children']:
                    obj1 = KUser.objects.get(pk=child.sender)
                    obj2 = KUser.objects.get(pk=child.receiver)
                    new_children.append({
                        'sender': {
                            'user_type': obj1.user_type,
                            'nickname': obj1.nickname,
                            'uid': obj1.uid,
                            'avatar': obj1.avatar,
                            'is_admin': obj1.is_admin,
                            'pk': obj1.pk
                        },
                        'receiver': {
                            'user_type': obj2.user_type,
                            'nickname': obj2.nickname,
                            'uid': obj2.uid,
                            'avatar': obj2.avatar,
                            'is_admin': obj2.is_admin,
                            'pk': obj2.pk
                        },
                        'is_child':
                        True,
                        'context':
                        child.context,
                        'parent':
                        child.parent,
                        'time':
                        child.time.astimezone(timezone_china),
                        'pk':
                        child.pk
                    })
                comment_dict['children'] = new_children
Example #49
0
 def description_html(self):
     return markdown.markdown(self.desc)
Example #50
0
    def get(self, request, anc, *args, **kwargs):
        # Get the ANC and static metadata from ancs.json.
        anc = anc.upper()
        try:
            info = anc_data[anc[0]]["ancs"][anc[1]]
        except KeyError:
            raise Http404()

        # For each SMD, pull in data from our database.
        smds = []
        for smd in info['smds']:
            smddict = copy.deepcopy(info['smds'][smd])
            smddict.update(CommissionerInfo.get_all(anc, smd))
            smds.append(smddict)
        smds.sort(key=lambda x: x['smd'])

        # Get the committees.
        committees = CommissionerInfo.get(anc, None, 'committees')
        if committees is not None:
            committees = re.sub(r"(^|\n)# ", r"\1### ", committees)
            committees = markdown.markdown(committees)

        # Find the next meeting and the most recent two meetings so we can
        # display related documents for those meetings.
        now = datetime.datetime.now()
        all_meetings = meeting_data.get(anc, {}).get("meetings", {})
        all_meetings = sorted([
            datetime.datetime.strptime(m, "%Y-%m-%dT%H:%M:%S")
            for m in all_meetings.keys()
            if all_meetings[m].get("status") != "invalid"
        ])
        next_meeting = None
        for m in all_meetings:
            if m > now:
                next_meeting = m  # this is the first meeting in the future (i.e. the next meeting)
                break
        i = all_meetings.index(
            next_meeting) if next_meeting is not None else len(all_meetings)
        previous_meetings = all_meetings[i - 2:i]

        prep_hoods(info, True)
        for smd in info["smds"].values():
            prep_hoods(smd, False)

        census_stats = [
            {
                "key": "P0180002",
                "label": "families",
                "details":
                "A group of two or more related people residing together",
                "credit": "US Census"
            },
            {
                "key": "P0180001",
                "label": "households",
                "details":
                "A house, apartment, or room intended as separate living quarters",
                "credit": "US Census"
            },
            {
                "key": "P0010001",
                "label": "residents",
                "details": "The total population of the ANC",
                "credit": "US Census"
            },
            {
                "key": "H0050001_PCT",
                "label": "vacant homes",
                "details": "Vacant housing units out of all housing units",
                "credit": "US Census",
                "is_percent": True
            },
            {
                "key": "B07001_001E_PCT",
                "label": "new residents",
                "details": "Residents who moved into DC in the last year",
                "credit": "US Census",
                "is_percent": True
            },
            {
                "key": "B01002_001E",
                "label": "median age",
                "details": "The median age of all residents in the ANC",
                "credit": "US Census"
            },
            {
                "key": "B19019_001E",
                "label": "median household income",
                "details": "The median income of households in the ANC",
                "credit": "US Census",
                "is_dollars": True
            },
            {
                "key": "POP_DENSITY",
                "label": u"density (pop/mi\u00B2)",
                "details": "Total population divided by the area of the ANC",
                "credit": "US Census"
            },
            {
                "key": "liquor_licenses",
                "label": "liquor licenses",
                "details":
                "Liquor licenses granted by ABRA held by bars and restaurants in the area",
                "credit": "ABRA"
            },
            {
                "key": "building_permits",
                "label": "building permits",
                "details":
                "Permits granted by DCRA for construction or alteration in the area",
                "credit": "DCRA"
            },
            {
                "key": "311_requests",
                "label": "311 requests",
                "details": "Requests to the 311 hotline from this area",
                "credit": "SeeClickFix"
            },
        ]
        for s in census_stats:
            try:
                s["value"] = info["census"][s["key"]]["value"]
            except KeyError:
                s["value"] = 0
            s["grid"] = census_grids[s["key"]]

        # recent ANC documents
        recent_documents = Document.objects.filter(
            anc=anc).order_by('-created')[0:10]

        # documents that *should* exist
        highlight_documents = []
        for mtg in previous_meetings + ([next_meeting]
                                        if next_meeting else []):
            hd_mtg = (mtg, [])
            highlight_documents.append(hd_mtg)

            has_doc = False
            for doc_type_id, doc_type_name in [(14, "Summary"), (2, "Minutes"),
                                               (1, "Agenda")]:
                # If minutes or a summary exist for a meeting, don't bother displaying an
                # agenda (or ask to upload an agenda) for the meeting.
                if has_doc and doc_type_id == 1:
                    continue

                # in case there are two documents of the same type, just get the first
                def first(qs):
                    if qs.count() == 0: raise Document.DoesNotExist()
                    return qs[0]

                # find the document
                try:
                    doc = first(
                        Document.objects.filter(anc=anc,
                                                doc_type=doc_type_id,
                                                meeting_date=mtg))
                    has_doc = True
                except Document.DoesNotExist:
                    doc = None

                # for meetings that aren't behind us, if a summary isn't available
                # don't bother asking the user to upload one.
                if not doc and doc_type_id == 14 and mtg >= now:
                    continue

                # for ANCs that have never had a summary posted, don't ask for one either
                if not doc and doc_type_id == 14 and not Document.objects.filter(
                        anc=anc, doc_type=doc_type_id).exists():
                    continue

                # for meetings that aren't two weeks behind us, if minutes aren't
                # available, don't bother asking for them because they are almost
                # certainly not available yet
                if not doc and doc_type_id == 2 and (now - mtg).days < 14:
                    continue

                hd_mtg[1].insert(0, (doc_type_id, doc_type_name, doc))

        return render(
            request, self.template_name, {
                'anc': anc,
                'info': info,
                'smds': smds,
                'committees': committees,
                'documents': recent_documents,
                'highlight_documents': highlight_documents,
                'census_stats': census_stats,
                'next_meeting': next_meeting,
            })
    # Typography filter
    for field in '''
        contributors 
        image_caption 
        image_source 
        short_write_up
        title 
    '''.split():
        record[field] = typography(record[field])

    # Markdown filter
    for field in '''
        short_write_up
    '''.split():
        record[field] = markdown(record[field])
        # JSON needs \" so Jekyll needs \\ followed by \"
        record[field] = record[field].replace('"', r'\\\"')

    # Semicolon-delimited --> YAML lists filter
    for field in '''
        contributors 
        related_solutions 
        related_stories 
        related_theories 
        tags 
        values
    '''.split():
        value = record[field]
        if value.strip():
            record[field] = '\n' + '\n'.join(u'- "{}"'.format(i)
Example #52
0
 def make_html(self):
     self.content = markdown.markdown(
         open(self.base_path).read(),
         extensions=['fenced_code', 'headerid'])
Example #53
0
def md(value):
    return markdown.markdown(value)
Example #54
0
def main(source_file, output_file):
    # First, extract two numbers that we need from the source docx file.
    # This is rather incredible but we do need them.
    src_dir = os.path.dirname(__file__)
    blank_docx_file = os.path.join(src_dir, "blank.docx")
    zf = zipfile.ZipFile(blank_docx_file, "r")
    f = zf.open("word/numbering.xml")
    numbering_xml_bytes = f.read()
    numbering_etree = ET.fromstring(numbering_xml_bytes)
    zf.close()

    def w(name):
        return u"{" + w_ns + u"}" + name

    first_numId = max(
        int(num.get(w(u"numId")))
        for num in numbering_etree.findall(w(u"num"))) + 1
    first_abstractNumId = max(
        int(num.get(w(u"abstractNumId")))
        for num in numbering_etree.findall(w(u"abstractNum"))) + 1

    # Load the file, stripping out everything not prefixed with "//>".
    # Treat as bytes; it works because UTF-8 is nice.
    lines = []
    with codecs.open(source_file, encoding="utf-8") as f:
        for line in f:
            line = line.strip()
            # TODO: run the pipeline separately for each sequence of //> lines
            # so as not to require extra "blank" //> lines to separate paragraphs.
            if line.startswith(u"//>"):
                line = line[3:]
                if line.startswith(u' '):
                    line = line[1:]
                lines.append(line + u"\n")

    source = u"".join(lines)
    source = preprocess(source)

    # Render from markdown to html to OOXML.
    html = markdown.markdown(source)
    #print(html)
    html_element = html5lib.parse(html, treebuilder="etree")
    word_element, num_pairs = html_to_ooxml(html_element, first_numId,
                                            first_abstractNumId)

    # Word refuses to open the file if we re-serialize the XML, even though the
    # infoset hasn't changed. Classy. I don't have the patience to figure out
    # which xmlns:wtf attribute I need to add. So hack the raw XML bytes
    # instead. (Works.)
    def insert_after(s, pattern, extra):
        i = s.rindex(pattern)
        if i == -1:
            raise ValueError("insert_after: pattern not found")
        i += len(pattern)
        return s[:i] + extra + s[i:]

    numbering_xml_bytes = insert_after(
        numbering_xml_bytes, "</w:num>", "".join(
            '<w:num w:numId="{}"><w:abstractNumId w:val="{}"/></w:num>'.format(
                k, v) for k, v in num_pairs))
    numbering_xml_bytes = insert_after(
        numbering_xml_bytes, "</w:abstractNum>",
        "".join('<w:abstractNum w:abstractNumId="{}">'
                '<w:multiLevelType w:val="multilevel"/>'
                '<w:numStyleLink w:val="ag3"/>'
                '</w:abstractNum>'.format(v) for k, v in num_pairs
                if v > 1000))

    # Generate output.
    temp_dir = tempfile.mkdtemp()
    output_docx = "modules.docx"
    temp_output_docx = os.path.join(temp_dir, output_docx)
    try:
        shutil.copy(blank_docx_file, temp_output_docx)
        os.mkdir(os.path.join(temp_dir, "word"))
        with open(os.path.join(temp_dir, "word", "document.xml"), "wb") as f:
            f.write(ET.tostring(word_element, encoding="UTF-8"))
        with open(os.path.join(temp_dir, "word", "numbering.xml"), "wb") as f:
            f.write(numbering_xml_bytes)
        subprocess.check_call([
            "zip", "-u", output_docx, "word/document.xml", "word/numbering.xml"
        ],
                              cwd=temp_dir)
        if os.path.exists(output_docx):
            os.remove(output_docx)
        shutil.move(temp_output_docx, output_file)
    finally:
        shutil.rmtree(temp_dir)
Example #55
0
def send_password_reset_link(email, user, secret):
    msg = Message(subject="Reset your password",
        recipients=[email])
    msg.body = render_template("emailreset.md", user=user, secret=secret)
    msg.html = markdown(msg.body)
    mail.send(msg)
Example #56
0
def index():
    """Present some documentation"""
    with open(os.path.dirname(app.root_path) + '/README.md', 'r') as markdown_file:
        content = markdown_file.read()
        return markdown.markdown(content)
def parse_markdown(value):
    return markdown.markdown(value)
    def handle(self, *args, **options):

        confirmed_keynotes = []
        confirmed_talks = []
        confirmed_workshops = []

        import dceu2019

        hugo_site_path = os.path.join(
            os.path.dirname(
                os.path.dirname(
                    os.path.dirname(os.path.dirname(dceu2019.__file__)))),
            "hugo_site",
        )

        talk_details_pages_path = os.path.join(
            hugo_site_path,
            "content",
            "talks",
        )

        for submission in submission_models.Submission.objects.filter(
                state=submission_models.SubmissionStates.CONFIRMED):

            # Create extra properties
            props, __ = models.TalkExtraProperties.objects.get_or_create(
                submission=submission)

            props.generate_images()
            self.stdout.write(
                self.style.SUCCESS("Generated new SOME preview images"))

            if not props.slug:
                props.slug = slugify(submission.title)[:50]
                props.save()

            slug = props.slug

            speakers = list(submission.speakers.all())
            speaker_names = ", ".join(
                [person.get_display_name() for person in speakers])

            images = {}

            if not props.published:
                self.stdout.write(
                    self.style.WARNING("Skipping unpublished talk"))
                continue

            for speaker in speakers:
                self.stdout.write(
                    self.style.SUCCESS("Adding confirmed speaker {}".format(
                        speaker.get_display_name())))
                if speaker.avatar:
                    im = get_thumbnail(speaker.avatar,
                                       '200x200',
                                       crop='center',
                                       quality=80)
                    images[
                        speaker] = "https://members.2019.djangocon.eu" + im.url
                elif speaker.get_gravatar:
                    images[
                        speaker] = "https://www.gravatar.com/avatar/" + speaker.gravatar_parameter
                else:
                    images[speaker] = None

            submission_json = {
                'title': submission.title,
                'abstract': submission.abstract,
                'speakers': speaker_names,
                'speaker_image': images[speakers[0]],
                'slug': slug,
                'speaker_twitter': props.speaker_twitter_handle,
                'keynote': 1 if props.keynote else 0,
                'workshop': 1 if props.workshop else 0,
            }

            if props.keynote:
                confirmed_keynotes.append(submission_json)
            elif props.workshop:
                confirmed_workshops.append(submission_json)
            else:
                confirmed_talks.append(submission_json)

            with timezone.override("Europe/Copenhagen"):
                start_time = timezone.localtime(
                    props.submission.slot.start).strftime("%A %H:%M")
                end_time = timezone.localtime(
                    props.submission.slot.end).strftime("%H:%M")

            talk_detail_page_content = TALK_PAGE_HTML.format(
                title=escape(submission.title),
                description=escape(strip_tags(markdown(submission.abstract))),
                speaker=speaker_names,
                speaker_image=images[speakers[0]],
                talk_title=submission.title,
                talk_date=str(datetime.now()),
                talk_abstract=submission.abstract,
                talk_description=submission.description,
                speaker_twitter=props.speaker_twitter_handle or "",
                employer_attribution=props.employer_attribution or "",
                employer_url=props.employer_url or "",
                keynote='true' if props.keynote else 'false',
                workshop='true' if props.workshop else 'false',
                twitter_card='https://members.2019.djangocon.eu' +
                props.twitter_card_image.url,
                room=props.submission.slot.room,
                timeslot=start_time + "-" + end_time,
            )

            talk_page_file = os.path.join(talk_details_pages_path,
                                          slug + ".md")
            open(talk_page_file, "w").write(talk_detail_page_content)

        json_path = os.path.join(hugo_site_path, "data", "talks.json")

        json.dump(confirmed_talks, open(json_path, "w"))

        json_path = os.path.join(hugo_site_path, "data", "keynotes.json")

        json.dump(confirmed_keynotes, open(json_path, "w"))

        json_path = os.path.join(hugo_site_path, "data", "workshops.json")

        json.dump(confirmed_workshops, open(json_path, "w"))
def test_trailing_whitespace():
    fig_data = ext.draw_aafig(BASIC_BLOCK_TXT)

    trailing_space_result = md.markdown(BASIC_BLOCK_TXT + "  ", extensions=['markdown_aafigure'])
    assert fig_data in trailing_space_result
    assert "```" not in trailing_space_result
Example #60
0
 def body_to_markdown(self):
     return markdown.markdown(self.content,
                              extensions=[
                                  'markdown.extensions.extra',
                                  'markdown.extensions.codehilite',
                              ])