示例#1
0
    def read(self, filename):
        content, metadata = super(BetterMarkdownReader, self).read(filename)
        rel_path = filename.replace(os.path.join(os.getcwd(), "content", ""), "")
        if rel_path[0] == "_":
            rel_path_split = rel_path.split(os.sep)
            basename = rel_path_split[-1].replace(".md", "")
            subsite = rel_path_split[0][1:]
            page_type = rel_path_split[1]
            template = os.path.join("subsites", subsite, "article")
            save_as = os.path.join(subsite, "articles", slugify(basename) + ".html")
            url = os.path.join(subsite, "articles", slugify(basename) + ".html")
            metadata["ispage"] = False

            if page_type == "pages":
                metadata["date"] = datetime.datetime.strptime("2000-01-01", "%Y-%m-%d")
                template = os.path.join("subsites", subsite, "page")
                save_as = os.path.join(subsite, "pages", slugify(basename) + ".html")
                url = os.path.join(subsite, "pages", slugify(basename) + ".html")
                metadata["ispage"] = True

            if ''.join(rel_path_split[1:]) == "index.md":
                template = os.path.join("subsites", subsite, "index")
                save_as = os.path.join(subsite, "index.html")
                url = os.path.join(subsite, "index.html")
                metadata["slug"] = slugify(subsite + "-" + basename)

            metadata["template"] = template
            metadata["save_as"] = save_as
            metadata["subsite"] = subsite
            metadata["url"] = url

        return content, metadata
示例#2
0
        def generate_urls(article):
            """
            returns the article with clean urls depending on 
            CLEAN_URLS_NO_PROXY option is enabled or not
            """

            if self.settings.get('CLEAN_URLS_NO_PROXY'):
                # cleaning urls
                article.save_as = os.path.splitext(article.save_as)[0]
                article.url = article.save_as + '/'
                article.save_as = os.path.join(article.save_as, 'index.html')
                article.author_url = 'author/%s/' % slugify(article.author)
                article.author_save_as = \
                        os.path.join(article.author_url, 'index.html')
                article.category_save_as = 'category/%s/index.html'
                article.category_url = \
                        os.path.dirname(
                                article.category_save_as % article.category
                                ) + '/'

                # saving url for each tag
                article.tag_url = 'tag/%s/'
                article.tag_save_as = os.path.join(article.tag_url, 'index.html')
                if hasattr(article, 'tags'):
                    article.tags_data = {}
                    for tag in article.tags:
                        article.tag_url = 'tag/%s/' % tag
                        article.tags_data[tag] = article.tag_url

                # cleaning drafts url too
                article.drafts_url = 'drafts/%s/'
                article.drafts_save_as = \
                        os.path.join(article.drafts_url, 'index.html')
            else:
                # cleaning urls
                article.author_save_as = \
                        'author/%s.html' % slugify(article.author)
                article.author_url = article.author_save_as
                article.category_save_as = 'category/%s.html'
                article.category_url = \
                        article.category_save_as % article.category

                # saving url for each tag
                article.tag_url = 'tag/%s.html'
                article.tag_save_as = article.tag_url
                if hasattr(article, 'tags'):
                    article.tags_data = {}
                    for tag in article.tags:
                        article.tag_url = 'tag/%s.html' % slugify(tag)
                        article.tags_data[tag] = article.tag_url

                # cleaning drafts url too
                article.drafts_url = 'drafts/%s.html'
                article.drafts_save_as = article.drafts_url
            
            return article
示例#3
0
 def slug(self):
     if self._slug is None:
         class_key = '{}_REGEX_SUBSTITUTIONS'.format(
             self.__class__.__name__.upper())
         if class_key in self.settings:
             self._slug = slugify(
                 self.name,
                 regex_subs=self.settings[class_key])
         else:
             self._slug = slugify(
                 self.name,
                 regex_subs=self.settings.get(
                     'SLUG_REGEX_SUBSTITUTIONS', []))
     return self._slug
示例#4
0
    def test_slugify(self):

        samples = (('this is a test', 'this-is-a-test'),
                   ('this        is a test', 'this-is-a-test'),
                   ('this → is ← a ↑ test', 'this-is-a-test'),
                   ('this--is---a test', 'this-is-a-test'),
                   ('unicode測試許功蓋,你看到了嗎?',
                    'unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma'),
                   ('大飯原発4号機、18日夜起動へ',
                    'da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he'),)

        for value, expected in samples:
            self.assertEqual(utils.slugify(value), expected)
            # nothing will be changed if allow_non_ascii is True.
            self.assertEqual(utils.slugify(value, allow_non_ascii=True), value)
示例#5
0
    def write_output(self):
        # make category dir if neccessary

        subdirectory = get_subdirectory_from_path(self.json_file_path)
        sub_dir_path = os.path.join('content', subdirectory)

        self.lock.acquire()
        if not os.path.exists(sub_dir_path):
            os.mkdir(sub_dir_path)
        self.lock.release()

        name = slugify(self.data.get('title').strip()) + '.rst'
        path = os.path.join(sub_dir_path, name)
        self.lock.acquire()
        with open(path, 'w') as fp:
            fp.write(self.output)
        self.lock.release()

        # Validate rST
        content, metadata = RstReader(DEFAULT_SETTINGS).read(path)
        if all(map(lambda x: x in content, ('system-message', 'docutils'))):
            start = content.find('<p class="system-message')
            end = content.find('</p>', start)
            snippet = content[start:end]
            msg_template = 'Unable to parse rST document generated from: {}\n'
            msg_template += '{}'
            msg = msg_template.format(self.title, snippet)
            raise RstValidationError(msg)
示例#6
0
    def __init__(self, *args, **kwargs):
        super(Session, self).__init__(*args, **kwargs)

        has_duration = hasattr(self, 'duration')
        has_start_date = hasattr(self, 'start_date')

        if has_duration:
            d = self.duration.split(' ')
            if len(d) <> 2 or d[1] <> "minutes":
                logger.error("Unknown duration format: %s", self.duration)
            self.duration = timedelta(minutes=int(d[0]))

        if has_start_date:
            self.start_date = get_date(self.start_date)
            self.locale_start_date = strftime(self.start_date, "%A %d")
            self.locale_start_time = strftime(self.start_date, "%H:%M")

        if has_duration and has_start_date:
            self.end_date = self.start_date + self.duration
            self.locale_end_time = strftime(self.end_date, "%H:%M")


        if not hasattr(self, 'bios'):
            bios = conference.bios.by_role_and_slug['speaker']
            self.bios = []
            for speaker in self.speakers:
                slug = slugify(speaker)
                self.bios.append(slug)
                if slug not in bios:
                    bio = Bio("", {'title': speaker}, settings=self.settings,
                                source_path="", context=self._context)
                    conference.add_bio(bio)
示例#7
0
 def __init__(self, name, settings):
     # next 2 lines are redundant with the setter of the name property
     # but are here for clarity
     self._name = name
     self.slug = slugify(name)
     self.name = name
     self.settings = settings
示例#8
0
def posterous2fields(api_token, email, password):
    """Imports posterous posts"""
    import base64
    from datetime import timedelta

    try:
        # py3k import
        import json
    except ImportError:
        # py2 import
        import simplejson as json

    try:
        # py3k import
        import urllib.request as urllib_request
    except ImportError:
        # py2 import
        import urllib2 as urllib_request

    def get_posterous_posts(api_token, email, password, page=1):
        base64string = base64.encodestring(("%s:%s" % (email, password)).encode("utf-8")).replace("\n", "")
        url = ("http://posterous.com/api/v2/users/me/sites/primary/" "posts?api_token=%s&page=%d") % (api_token, page)
        request = urllib_request.Request(url)
        request.add_header("Authorization", "Basic %s" % base64string.decode())
        handle = urllib_request.urlopen(request)
        posts = json.loads(handle.read().decode("utf-8"))
        return posts

    page = 1
    posts = get_posterous_posts(api_token, email, password, page)
    while len(posts) > 0:
        posts = get_posterous_posts(api_token, email, password, page)
        page += 1

        for post in posts:
            slug = post.get("slug")
            if not slug:
                slug = slugify(post.get("title"))
            tags = [tag.get("name") for tag in post.get("tags")]
            raw_date = post.get("display_date")
            date_object = SafeDatetime.strptime(raw_date[:-6], "%Y/%m/%d %H:%M:%S")
            offset = int(raw_date[-5:])
            delta = timedelta(hours=(offset / 100))
            date_object -= delta
            date = date_object.strftime("%Y-%m-%d %H:%M")
            kind = "article"  # TODO: Recognise pages
            status = "published"  # TODO: Find a way for draft posts

            yield (
                post.get("title"),
                post.get("body_cleaned"),
                slug,
                date,
                post.get("user").get("display_name"),
                [],
                tags,
                status,
                kind,
                "html",
            )
示例#9
0
    def add(self, new_header, ids):
        new_level = new_header.name
        new_string = new_header.string
        new_id = new_header.attrs.get('id')

        if not new_string:
            new_string = new_header.find_all(
                    text=lambda t: not isinstance(t, Comment),
                    recursive=True)
            new_string = "".join(new_string)

        if not new_id:
            new_id = slugify(new_string, ())

        new_id = unique(new_id, ids)  # make sure id is unique
        new_header.attrs['id'] = new_id
        if(self.level < new_level):
            new_node = HtmlTreeNode(self, new_string, new_level, new_id,
                                    self.include_title)
            self.children += [new_node]
            return new_node, new_header
        elif(self.level == new_level):
            new_node = HtmlTreeNode(self.parent, new_string, new_level, new_id,
                                    self.include_title)
            self.parent.children += [new_node]
            return new_node, new_header
        elif(self.level > new_level):
            return self.parent.add(new_header, ids)
示例#10
0
def add_ical(generator, metadata):
    # check if a calendar is here
    if 'calendar' in metadata.keys():
        summ = []
        path = metadata['calendar']
        if not os.path.isabs(path):
            path = os.path.abspath(metadata['calendar'])
        cal = Calendar.from_ical(open(path,'rb').read())
        for element in cal.walk():
            eventdict = {}
            if element.name == "VEVENT":
                if element.get('summary') != None:
                    eventdict['summary'] = element.get('summary')
                if element.get('description') != None:
                    eventdict['description'] = element.get('description')
                if element.get('url') != None:
                    eventdict['url'] = element.get('url')
                if element.get('dtstart') != None:
                    eventdict['dtstart'] = element.get('dtstart').dt
                if element.get('dtend') != None:
                    eventdict['dtend'] = element.get('dtend').dt
                summ.append(eventdict)
        # the id of the calendar is the slugified name of the page
        calId = utils.slugify(metadata['title'])
        generator.context['events'][calId] = summ
示例#11
0
 def __init__(self, name, settings, setting_var=None):
     self.name = name
     self.slug = slugify(self.name)
     self.settings = settings
     if not setting_var:
         setting_var = self.__class__.__name__.upper()
     self.setting_var = setting_var
def make_video_file(videos_dir, video_data):
    if video_data['veyepar_state'] < 11:
        return
    file_data = {}
    file_data['description'] = video_data['description']
    file_data['title'] = video_data['title']
    file_data['speakers'] = video_data['speakers']
    file_data['recorded'] = video_data['recorded'].split('T')[0]
    related_urls = []
    videos = []
    for item in video_data['videos']:
        if item['type'] == 'conf':
            related_urls.append(item['url'])
        elif item['type'] == 'archive':
            videos.append(dict(type=item['type'], url=item['url']))
        elif item['type'] == 'host':
            videos.append(dict(type='youtube', url=item['url']))
    file_data['videos'] = videos
    file_data['related_urls'] = related_urls
    file_data['thumbnail_url'] = 'https://i.ytimg.com/vi/{}/hqdefault.jpg'.format(
        video_data['source_url'].split('/')[-1])
    file_data['duration'] = video_data['duration']

    path = os.path.join(videos_dir, slugify(file_data['title']) + '.json')
    with open(path, 'w') as fp:
        json.dump(file_data, fp, **JSON_FORMAT_KWARGS)
示例#13
0
 def __init__(self, name, settings):
     # next 2 lines are redundant with the setter of the name property
     # but are here for clarity
     self.settings = settings
     self._name = name
     self.slug = slugify(name, self.settings.get('SLUG_SUBSTITUTIONS', ()))
     self.name = name
示例#14
0
 def slug(self):
     if self._slug is None:
         substitutions = self.settings.get('SLUG_SUBSTITUTIONS', ())
         substitutions += tuple(self.settings.get('CATEGORY_SUBSTITUTIONS',
                                                  ()))
         self._slug = slugify(self.name, substitutions)
     return self._slug
示例#15
0
    def generate_pages(self, writer):
        """Generate the pages on the disk"""

        write = partial(
            writer.write_file,
            relative_urls = self.settings.get('RELATIVE_URLS')
        )

        # to minimize the number of relative path stuff modification
        # in writer, articles pass first
        article_template = self.get_template('article')
        for article in chain(self.translations, self.articles):
            write(article.save_as,
                          article_template, self.context, article=article,
                          category=article.category)

        PAGINATED_TEMPLATES = self.settings.get('PAGINATED_DIRECT_TEMPLATES')
        for template in self.settings.get('DIRECT_TEMPLATES'):
            paginated = {}
            if template in PAGINATED_TEMPLATES:
                paginated = {'articles': self.articles, 'dates': self.dates}

            template_save_as = '%s.html' % template
            if self.settings.get('CLEAN_URLS_NO_PROXY'):
                # also, cleaning archives, categories and tags urls
                if template not in ('index',):
                    template_save_as = '%s/index.html' % template

            write(template_save_as, self.get_template(template), self.context,
                    blog=True, paginated=paginated, page_name=template)

        # and subfolders after that
        tag_template = self.get_template('tag')
        for tag, articles in self.tags.items():
            articles.sort(key=attrgetter('date'), reverse=True)
            dates = [article for article in self.dates if article in articles]
            write(article.tag_save_as % slugify(tag), tag_template, self.context, tag=tag,
                articles=articles, dates=dates,
                paginated={'articles': articles, 'dates': dates},
                page_name='tag/%s' % tag)

        category_template = self.get_template('category')
        for cat, articles in self.categories:
            dates = [article for article in self.dates if article in articles]
            write(article.category_save_as % cat, category_template, self.context,
                category=cat, articles=articles, dates=dates,
                paginated={'articles': articles, 'dates': dates},
                page_name='category/%s' % cat)

        author_template = self.get_template('author')
        for aut, articles in self.authors:
            dates = [article for article in self.dates if article in articles]
            write(article.author_save_as, author_template, self.context,
                author=aut, articles=articles, dates=dates,
                paginated={'articles': articles, 'dates': dates},
                page_name='author/%s' % aut)

        for article in self.drafts:
            write(article.drafts_save_as % article.slug, article_template, self.context,
                    article=article, category=article.category)
示例#16
0
 def newpage(self, title, markup='md'):
     """
     Create a new page ("appendix") with the specified title. Returns
     the path to the page file.
     """
     MARKUP_OPTIONS = {
         'md': "Title: {0}\n\n",
         'rst': "{0}\n###################\n\n",
         'html': "<html>\n\t<head>\n\t\t<title>{0}</title>\n\t</head>"
                 "\n\t<body>\n\t\t\n\t</body>\n</html>\n",
     }
     name = slugify(title)
     for ext in MARKUP_OPTIONS:
         if os.path.isfile(os.path.join(self.location,self.APPE_DIR,
                           name + '.' + ext)):
             raise ScribblerError('Date and name produce slug the '
                                  'same as existing file ' + name +
                                  '.' + ext)
     basename = name + '.' + markup
     path = os.path.join(self.location, self.APPE_DIR, basename)
     out = open(path, 'w')
     try:
         out.write(MARKUP_OPTIONS[markup].format(title))
         out.close()
     except KeyError as e:
         out.close()
         os.remove(path)
         raise e
     self.appendices[basename] = ScribblerContent(title, '????-??-??', 
                                                  os.path.join(self.APPE_DIR, 
                                                  basename), self)
     self.save(os.path.join(self.location, self.BACKUP_FILE))
     return path
def get_out_filename(output_path, filename, ext, kind,
        dirpage, dircat, categories, wp_custpost):
    filename = os.path.basename(filename)

    # Enforce filename restrictions for various filesystems at once; see
    # http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
    # we do not need to filter words because an extension will be appended
    filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars
    filename = filename.lstrip('.') # should not start with a dot
    if not filename:
        filename = '_'
    filename = filename[:249] # allow for 5 extra characters

    out_filename = os.path.join(output_path, filename+ext)
    # option to put page posts in pages/ subdirectory
    if dirpage and kind == 'page':
        pages_dir = os.path.join(output_path, 'pages')
        if not os.path.isdir(pages_dir):
            os.mkdir(pages_dir)
        out_filename = os.path.join(pages_dir, filename+ext)
    elif not dirpage and kind == 'page':
        pass
    # option to put wp custom post types in directories with post type
    # names. Custom post types can also have categories so option to
    # create subdirectories with category names
    elif kind != 'article':
        if wp_custpost:
            typename = slugify(kind)
        else:
            typename = ''
            kind = 'article'
        if dircat and (len(categories) > 0):
            catname = slugify(categories[0])
        else:
            catname = ''
        out_filename = os.path.join(output_path, typename,
            catname, filename+ext)
        if not os.path.isdir(os.path.join(output_path, typename, catname)):
            os.makedirs(os.path.join(output_path, typename, catname))
    # option to put files in directories with categories names
    elif dircat and (len(categories) > 0):
        catname = slugify(categories[0])
        out_filename = os.path.join(output_path, catname, filename+ext)
        if not os.path.isdir(os.path.join(output_path, catname)):
            os.mkdir(os.path.join(output_path, catname))

    return out_filename
示例#18
0
def fields2pelican(fields, out_markup, output_path, dircat=False):
    for title, content, filename, date, author, categories, tags, in_markup in fields:
        if (in_markup == "markdown") or (out_markup == "markdown") :
            ext = '.md'
            header = build_markdown_header(title, date, author, categories, tags)
        else:
            out_markup = "rst"
            ext = '.rst'
            header = build_header(title, date, author, categories, tags)

        filename = os.path.basename(filename)

        # option to put files in directories with categories names
        if dircat and (len(categories) == 1):
            catname = slugify(categories[0])
            out_filename = os.path.join(output_path, catname, filename+ext)
            if not os.path.isdir(os.path.join(output_path, catname)):
                os.mkdir(os.path.join(output_path, catname))
        else:
            out_filename = os.path.join(output_path, filename+ext)

        print(out_filename)

        if in_markup == "html":
            html_filename = os.path.join(output_path, filename+'.html')

            with open(html_filename, 'w', encoding='utf-8') as fp:
                # Replace simple newlines with <br />+newline so that the HTML file
                # represents the original post more accurately
                content = content.replace("\n", "<br />\n")
                fp.write(content)

            cmd = 'pandoc --normalize --reference-links --from=html --to={0} -o "{1}" "{2}"'.format(
                out_markup, out_filename, html_filename)

            try:
                rc = subprocess.call(cmd, shell=True)
                if rc < 0:
                    print("Child was terminated by signal %d" % -rc)
                    exit()
                elif rc > 0:
                    print("Please, check your Pandoc installation.")
                    exit()
            except OSError, e:
                print("Pandoc execution failed: %s" % e)
                exit()

            os.remove(html_filename)

            with open(out_filename, 'r', encoding='utf-8') as fs:
                content = fs.read()
                if out_markup == "markdown":
                    # In markdown, to insert a <br />, end a line with two or more spaces & then a end-of-line
                    content = content.replace("\\\n ", "  \n")
                    content = content.replace("\\\n", "  \n")

        with open(out_filename, 'w', encoding='utf-8') as fs:
            fs.write(header + content)
示例#19
0
    def generate_context(self):
        """change the context"""

        # return the list of files to use
        files = self.get_files(self.path, exclude=['pages',])
        all_articles = []
        for f in files:

            try:
                content, metadata = read_file(f, settings=self.settings)
            except Exception, e:
                warning(u'Could not process %s\n%s' % (f, str(e)))
                continue

            # if no category is set, use the name of the path as a category
            if 'category' not in metadata.keys():

                if os.path.dirname(f) == self.path:
                    category = self.settings['DEFAULT_CATEGORY']
                else:
                    category = os.path.basename(os.path.dirname(f)).decode('utf-8')

                if category != '':
                    metadata['category'] = unicode(category)

            if 'date' not in metadata.keys()\
                and self.settings['FALLBACK_ON_FS_DATE']:
                    metadata['date'] = datetime.fromtimestamp(os.stat(f).st_ctime)

            article = Article(content, metadata, settings=self.settings,
                              filename=f)
            if not is_valid_content(article, f):
                continue

            add_to_url = u''
            if 'ARTICLE_PERMALINK_STRUCTURE' in self.settings:
                article_permalink_structure = self.settings['ARTICLE_PERMALINK_STRUCTURE']
                article_permalink_structure = article_permalink_structure.lstrip('/').replace('%(', "%%(")

                # try to substitute any python datetime directive
                add_to_url = article.date.strftime(article_permalink_structure)
                # try to substitute any article metadata in rest file
                add_to_url = add_to_url % article.__dict__
                add_to_url = [slugify(i) for i in add_to_url.split('/')]
                add_to_url = os.path.join(*add_to_url)

            article.url = urlparse.urljoin(add_to_url, article.url)
            article.save_as = urlparse.urljoin(add_to_url, article.save_as)

            if article.status == "published":
                if hasattr(article, 'tags'):
                    for tag in article.tags:
                        self.tags[tag].append(article)
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)
            elif article.status == "noindex":
                self.noindex.append(article)
示例#20
0
    def test_slugify(self):

        samples = (('this is a test', 'this-is-a-test'),
                   ('this        is a test', 'this-is-a-test'),
                   (u'this → is ← a ↑ test', 'this-is-a-test'),
                   ('this--is---a test', 'this-is-a-test'))

        for value, expected in samples:
            self.assertEquals(utils.slugify(value), expected)
示例#21
0
    def generate_context(self):
        series_slugs = {}
        for article in self.context['articles']:
            if article.status.lower() == "published" and hasattr(article, 'series'):
                slug = slugify(article.series, self.settings.get('SLUG_SUBSTITUTIONS', ()))
                self.series[slug].append(article)

                if article.series not in self.series_slug:
                    self.series_slug[article.series] = slug
示例#22
0
 def set_extra_info(article):
     data = {'date': article['metadata']['date'],
             'slug': slugify(article['title'])}
     article['url'] = article_url.format(**data)
     article_filename = os.path.join(cat_path, article['filename'])
     article_uuid = article_filename[len(content_root) + 1:]
     comments = comments_database.get_comments(article_uuid=article_uuid)
     article['comments_count'] = len(list(comments))
     return article
示例#23
0
 def name_to_filename(name):
     """
     Converts a notebook name to the filename which would store the
     pickled notebook.
     """
     #~ name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')
     #~ name = unicode(re.sub('[^\w\s-]', '', name).strip().lower())
     #~ name = re.sub('[-\s]+', '-', name) 
     return slugify(name) + '.pkl'
示例#24
0
文件: gitoyen.py 项目: sbadia/site
def new_post(config, title):
    '''Create a new blog entry'''
    date = datetime.date.today().isoformat()
    filename = '.'.join([date, utils.slugify(title), 'md'])
    filename = config.blog / filename
    click.echo('Create new post: %s' % click.style(filename, fg='green'))

    (config.jinja.get_template('new_post.tplt')
     .stream(title=title)
     .dump(filename, 'utf8'))
示例#25
0
    def test_slugify_substitute(self):

        samples = (('C++ is based on C', 'cpp-is-based-on-c'),
                   ('C+++ test C+ test', 'cpp-test-c-test'),
                   ('c++, c#, C#, C++', 'cpp-c-sharp-c-sharp-cpp'),
                   ('c++-streams', 'cpp-streams'),)

        subs = (('C++', 'CPP'), ('C#', 'C-SHARP'))
        for value, expected in samples:
            self.assertEqual(utils.slugify(value, subs), expected)
示例#26
0
def posterous2fields(api_token, email, password):
    """Imports posterous posts"""
    import base64
    from datetime import timedelta
    try:
        # py3k import
        import json
    except ImportError:
        # py2 import
        import simplejson as json

    try:
        # py3k import
        import urllib.request as urllib_request
    except ImportError:
        # py2 import
        import urllib2 as urllib_request

    def get_posterous_posts(api_token, email, password, page=1):
        base64string = base64.encodestring(
            ("%s:%s" % (email, password)).encode('utf-8')).replace('\n', '')
        url = ("http://posterous.com/api/v2/users/me/sites/primary/"
               "posts?api_token=%s&page=%d") % (api_token, page)
        request = urllib_request.Request(url)
        request.add_header('Authorization', 'Basic %s' % base64string.decode())
        handle = urllib_request.urlopen(request)
        posts = json.loads(handle.read().decode('utf-8'))
        return posts

    page = 1
    posts = get_posterous_posts(api_token, email, password, page)
    settings = read_settings()
    subs = settings['SLUG_REGEX_SUBSTITUTIONS']
    while len(posts) > 0:
        posts = get_posterous_posts(api_token, email, password, page)
        page += 1

        for post in posts:
            slug = post.get('slug')
            if not slug:
                slug = slugify(post.get('title'), regex_subs=subs)
            tags = [tag.get('name') for tag in post.get('tags')]
            raw_date = post.get('display_date')
            date_object = SafeDatetime.strptime(
                raw_date[:-6], '%Y/%m/%d %H:%M:%S')
            offset = int(raw_date[-5:])
            delta = timedelta(hours=(offset / 100))
            date_object -= delta
            date = date_object.strftime('%Y-%m-%d %H:%M')
            kind = 'article'      # TODO: Recognise pages
            status = 'published'  # TODO: Find a way for draft posts

            yield (post.get('title'), post.get('body_cleaned'),
                   slug, date, post.get('user').get('display_name'),
                   [], tags, status, kind, 'html')
示例#27
0
    def test_slugify(self):

        samples = (('this is a test', 'this-is-a-test'),
                   ('this        is a test', 'this-is-a-test'),
                   (u'this → is ← a ↑ test', 'this-is-a-test'),
                   ('this--is---a test', 'this-is-a-test'),
                   (u'unicode測試許功蓋,你看到了嗎?', 'unicodece-shi-xu-gong-gai-ni-kan-dao-liao-ma'),
                   (u'大飯原発4号機、18日夜起動へ', 'da-fan-yuan-fa-4hao-ji-18ri-ye-qi-dong-he'),)

        for value, expected in samples:
            self.assertEquals(utils.slugify(value), expected)
示例#28
0
def create_file(docroot, filename, date_string, title, *args, **kwargs):
    path = os.path.join(docroot, filename)
    new_file = open(path, 'w')
    new_file.write('Date: {0}\n'.format(date_string))
    new_file.write('Title: {0}\n'.format(title))
    new_file.write('Slug: {0}\n'.format(slugify(title)))
    if kwargs.get('tags'):
        new_file.write('Tags: {0}\n'.format(kwargs.get('tags')))
    if kwargs.get('category'):
        new_file.write('Category: {0}\n'.format(kwargs.get('category')))
    return True
示例#29
0
    def test_slugify_substitute_and_keeping_non_alphanum(self):

        samples = (('Fedora QA', 'fedora.qa'),
                   ('C++ is used by Fedora QA', 'cpp is used by fedora.qa'),
                   ('C++ is based on C', 'cpp-is-based-on-c'),
                   ('C+++ test C+ test', 'cpp-test-c-test'),)

        subs = (('Fedora QA', 'fedora.qa', True),
                ('c++', 'cpp'),)
        for value, expected in samples:
            self.assertEqual(utils.slugify(value, subs), expected)
示例#30
0
文件: article.py 项目: AcrDijon/henet
def create_article_or_page():
    data = dict(request.POST.decode())
    category = u'resultats'
    page = None

    for key, val in data.items():
        if key.startswith(u'cat_add_'):
            category = key[len(u'cat_add_'):]
            break
        if key.startswith(u'page_add_'):
            page = key[len(u'page_add_'):]
            break

    title = data.get('title', u'').strip()
    if len(title) == 0:
        # nope
        app.add_alert(_('A title is required.'))
        if page is None:
            redirect('/category/%s' % category)
        else:
            redirect('/page/%s' % page)
        return

    article = Article()
    article['title'] = data['title']
    article['body'] = data.get('content', DEFAULT_BODY)
    date = datetime.datetime.now()
    article.set_metadata('date', date)

    if page is None:
        # it's an article
        cat_info = dict(app.vars['categories'])[category]
        article.set_metadata('category', cat_info['title'])
        path = cat_info['path']
    else:
        # it's a page
        path = dict(app.vars['pages'])[page]['path']

    # XXX we might want to put it under the year directory
    i = 1
    filename = slugify(article['title'])
    fullfilename = os.path.join(path, filename)
    while os.path.exists(fullfilename + '.rst'):
        fullfilename += str(i)
        i += 1

    with open(fullfilename + '.rst', 'w') as f:
        f.write(article.render().encode('utf8'))

    emit(EVENT_CREATED_CONTENT, article_path=fullfilename)
    if page is None:
        redirect('/category/%s/%s' % (category, filename + '.rst'))
    else:
        redirect('/page/%s/%s' % (page, filename + '.rst'))
示例#31
0
def tumblr2fields(api_key, blogname):
    """ Imports Tumblr posts (API v2)"""
    from time import strftime, localtime
    try:
        # py3k import
        import json
    except ImportError:
        # py2 import
        import simplejson as json

    try:
        # py3k import
        import urllib.request as urllib_request
    except ImportError:
        # py2 import
        import urllib2 as urllib_request

    def get_tumblr_posts(api_key, blogname, offset=0):
        url = "http://api.tumblr.com/v2/blog/%s.tumblr.com/posts?api_key=%s&offset=%d&filter=raw" % (
            blogname, api_key, offset)
        request = urllib_request.Request(url)
        handle = urllib_request.urlopen(request)
        posts = json.loads(handle.read().decode('utf-8'))
        return posts.get('response').get('posts')

    offset = 0
    posts = get_tumblr_posts(api_key, blogname, offset)
    while len(posts) > 0:
        for post in posts:
            title = post.get('title') or post.get('source_title') or post.get(
                'type').capitalize()
            slug = post.get('slug') or slugify(title)
            tags = post.get('tags')
            timestamp = post.get('timestamp')
            date = strftime("%Y-%m-%d %H:%M:%S", localtime(int(timestamp)))
            slug = strftime("%Y-%m-%d-", localtime(int(timestamp))) + slug
            format = post.get('format')
            content = post.get('body')
            type = post.get('type')
            if type == 'photo':
                if format == 'markdown':
                    fmtstr = '![%s](%s)'
                else:
                    fmtstr = '<img alt="%s" src="%s" />'
                content = '\n'.join(fmtstr %
                                    (photo.get('caption'),
                                     photo.get('original_size').get('url'))
                                    for photo in post.get('photos'))
                content += '\n\n' + post.get('caption')
            elif type == 'quote':
                if format == 'markdown':
                    fmtstr = '\n\n&mdash; %s'
                else:
                    fmtstr = '<p>&mdash; %s</p>'
                content = post.get('text') + fmtstr % post.get('source')
            elif type == 'link':
                if format == 'markdown':
                    fmtstr = '[via](%s)\n\n'
                else:
                    fmtstr = '<p><a href="%s">via</a></p>\n'
                content = fmtstr % post.get('url') + post.get('description')
            elif type == 'audio':
                if format == 'markdown':
                    fmtstr = '[via](%s)\n\n'
                else:
                    fmtstr = '<p><a href="%s">via</a></p>\n'
                content = fmtstr % post.get('source_url') + post.get(
                    'caption') + post.get('player')
            elif type == 'video':
                if format == 'markdown':
                    fmtstr = '[via](%s)\n\n'
                else:
                    fmtstr = '<p><a href="%s">via</a></p>\n'
                content = fmtstr % post.get('source_url') + post.get(
                    'caption') + '\n'.join(
                        player.get('embed_code')
                        for player in post.get('player'))
            elif type == 'answer':
                title = post.get('question')
                content = '<p><a href="%s" rel="external nofollow">%s</a>: %s</p>\n%s' % (
                    post.get('asking_name'), post.get('asking_url'),
                    post.get('question'), post.get('answer'))

            content = content.rstrip() + '\n'
            kind = 'article'
            yield (title, content, slug, date, post.get('blog_name'), [type],
                   tags, kind, format)

        offset += len(posts)
        posts = get_tumblr_posts(api_key, blogname, offset)
示例#32
0
def dc2fields(file):
    """Opens a Dotclear export file, and yield pelican fields"""
    try:
        from bs4 import BeautifulSoup
    except ImportError:
        error = (
            'Missing dependency '
            '"BeautifulSoup4" and "lxml" required to import Dotclear files.')
        sys.exit(error)

    in_cat = False
    in_post = False
    category_list = {}
    posts = []

    with open(file, 'r', encoding='utf-8') as f:

        for line in f:
            # remove final \n
            line = line[:-1]

            if line.startswith('[category'):
                in_cat = True
            elif line.startswith('[post'):
                in_post = True
            elif in_cat:
                fields = line.split('","')
                if not line:
                    in_cat = False
                else:
                    # remove 1st and last ""
                    fields[0] = fields[0][1:]
                    # fields[-1] = fields[-1][:-1]
                    category_list[fields[0]] = fields[2]
            elif in_post:
                if not line:
                    in_post = False
                    break
                else:
                    posts.append(line)

    print("%i posts read." % len(posts))

    for post in posts:
        fields = post.split('","')

        # post_id = fields[0][1:]
        # blog_id = fields[1]
        # user_id = fields[2]
        cat_id = fields[3]
        # post_dt = fields[4]
        # post_tz = fields[5]
        post_creadt = fields[6]
        # post_upddt = fields[7]
        # post_password = fields[8]
        # post_type = fields[9]
        post_format = fields[10]
        # post_url = fields[11]
        # post_lang = fields[12]
        post_title = fields[13]
        post_excerpt = fields[14]
        post_excerpt_xhtml = fields[15]
        post_content = fields[16]
        post_content_xhtml = fields[17]
        # post_notes = fields[18]
        # post_words = fields[19]
        # post_status = fields[20]
        # post_selected = fields[21]
        # post_position = fields[22]
        # post_open_comment = fields[23]
        # post_open_tb = fields[24]
        # nb_comment = fields[25]
        # nb_trackback = fields[26]
        post_meta = fields[27]
        # redirect_url = fields[28][:-1]

        # remove seconds
        post_creadt = ':'.join(post_creadt.split(':')[0:2])

        author = ""
        categories = []
        tags = []

        if cat_id:
            categories = [
                category_list[id].strip() for id in cat_id.split(',')
            ]

        # Get tags related to a post
        tag = post_meta.replace('{', '').replace('}', '').replace(
            'a:1:s:3:\\"tag\\";a:', '').replace('a:0:', '')
        if len(tag) > 1:
            if int(tag[:1]) == 1:
                newtag = tag.split('"')[1]
                tags.append(
                    BeautifulSoup(newtag, "xml")
                    # bs4 always outputs UTF-8
                    .decode('utf-8'))
            else:
                i = 1
                j = 1
                while (i <= int(tag[:1])):
                    newtag = tag.split('"')[j].replace('\\', '')
                    tags.append(
                        BeautifulSoup(newtag, "xml")
                        # bs4 always outputs UTF-8
                        .decode('utf-8'))
                    i = i + 1
                    if j < int(tag[:1]) * 2:
                        j = j + 2
        """
        dotclear2 does not use markdown by default unless you use the markdown plugin
        Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown
        """
        if post_format == "markdown":
            content = post_excerpt + post_content
        else:
            content = post_excerpt_xhtml + post_content_xhtml
            content = content.replace('\\n', '')
            post_format = "html"

        kind = 'article'  # TODO: Recognise pages

        yield (post_title, content, slugify(post_title), post_creadt, author,
               categories, tags, kind, post_format)
示例#33
0
def fields2pelican(fields,
                   out_markup,
                   output_path,
                   dircat=False,
                   strip_raw=False,
                   disable_slugs=False):
    for title, content, filename, date, author, categories, tags, in_markup in fields:
        slug = not disable_slugs and filename or None
        if (in_markup == "markdown") or (out_markup == "markdown"):
            ext = '.md'
            header = build_markdown_header(title, date, author, categories,
                                           tags, slug)
        else:
            out_markup = "rst"
            ext = '.rst'
            header = build_header(title, date, author, categories, tags, slug)

        filename = os.path.basename(filename)

        # option to put files in directories with categories names
        if dircat and (len(categories) > 0):
            catname = slugify(categories[0])
            out_filename = os.path.join(output_path, catname, filename + ext)
            if not os.path.isdir(os.path.join(output_path, catname)):
                os.mkdir(os.path.join(output_path, catname))
        else:
            out_filename = os.path.join(output_path, filename + ext)

        print(out_filename)

        if in_markup == "html":
            html_filename = os.path.join(output_path, filename + '.html')

            with open(html_filename, 'w', encoding='utf-8') as fp:
                # Replace newlines with paragraphs wrapped with <p> so
                # HTML is valid before conversion
                paragraphs = content.splitlines()
                paragraphs = ['<p>{0}</p>'.format(p) for p in paragraphs]
                new_content = ''.join(paragraphs)

                fp.write(new_content)

            parse_raw = '--parse-raw' if not strip_raw else ''
            cmd = ('pandoc --normalize --reference-links {0} --from=html'
                   ' --to={1} -o "{2}" "{3}"').format(parse_raw, out_markup,
                                                      out_filename,
                                                      html_filename)

            try:
                rc = subprocess.call(cmd, shell=True)
                if rc < 0:
                    error = "Child was terminated by signal %d" % -rc
                    exit(error)

                elif rc > 0:
                    error = "Please, check your Pandoc installation."
                    exit(error)
            except OSError as e:
                error = "Pandoc execution failed: %s" % e
                exit(error)

            os.remove(html_filename)

            with open(out_filename, 'r', encoding='utf-8') as fs:
                content = fs.read()
                if out_markup == "markdown":
                    # In markdown, to insert a <br />, end a line with two or more spaces & then a end-of-line
                    content = content.replace("\\\n ", "  \n")
                    content = content.replace("\\\n", "  \n")

        with open(out_filename, 'w', encoding='utf-8') as fs:
            fs.write(header + content)
示例#34
0
    def __init__(self,
                 content,
                 metadata=None,
                 settings=None,
                 source_path=None,
                 context=None):

        if metadata is None:
            metadata = {}
        if settings is None:
            settings = copy.deepcopy(DEFAULT_CONFIG)

        self.settings = settings
        self.content = content
        if context is None:
            context = {}
        self._context = context
        self.translations = []

        local_metadata = dict()
        local_metadata.update(metadata)

        # set metadata as attributes
        for key, value in local_metadata.items():
            # if key in ('save_as', 'url'):
            #     key = 'override_' + key
            setattr(self, key.lower(), value)

        # also keep track of the metadata attributes available
        self.metadata = local_metadata

        #default template if it's not defined in page
        # self.template = self._get_template()

        # First, read the authors from "authors", if not, fallback to "author"
        # and if not use the settings defined one, if any.
        # if not hasattr(self, 'author'):
        #     if hasattr(self, 'authors'):
        #         self.author = self.authors[0]
        #     elif 'AUTHOR' in settings:
        #         self.author = Author(settings['AUTHOR'], settings)
        #
        # if not hasattr(self, 'authors') and hasattr(self, 'author'):
        #     self.authors = [self.author]

        # XXX Split all the following code into pieces, there is too much here.

        # manage languages
        # self.in_default_lang = True
        # if 'DEFAULT_LANG' in settings:
        #     default_lang = settings['DEFAULT_LANG'].lower()
        #     if not hasattr(self, 'lang'):
        #         self.lang = default_lang
        #
        #     self.in_default_lang = (self.lang == default_lang)

        # create the slug if not existing,
        # generate slug according to the filename
        if not hasattr(self, 'slug'):
            basename = os.path.basename(os.path.splitext(source_path)[0])
            self.slug = slugify(basename, settings.get('SLUG_SUBSTITUTIONS',
                                                       ()))

        self.source_path = source_path

        # manage the date format
        # if not hasattr(self, 'date_format'):
        #     if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']:
        #         self.date_format = settings['DATE_FORMATS'][self.lang]
        #     else:
        #         self.date_format = settings['DEFAULT_DATE_FORMAT']
        #
        # if isinstance(self.date_format, tuple):
        #     locale_string = self.date_format[0]
        #     if sys.version_info < (3, ) and isinstance(locale_string,
        #                                                six.text_type):
        #         locale_string = locale_string.encode('ascii')
        #     locale.setlocale(locale.LC_ALL, locale_string)
        #     self.date_format = self.date_format[1]
        #
        # # manage timezone
        # default_timezone = settings.get('TIMEZONE', 'UTC')
        # timezone = getattr(self, 'timezone', default_timezone)
        #
        # if hasattr(self, 'date'):
        #     self.date = set_date_tzinfo(self.date, timezone)
        #     self.locale_date = strftime(self.date, self.date_format)
        #
        # if hasattr(self, 'modified'):
        #     self.modified = set_date_tzinfo(self.modified, timezone)
        #     self.locale_modified = strftime(self.modified, self.date_format)
        #
        # # manage status
        # if not hasattr(self, 'status'):
        #     self.status = settings['DEFAULT_STATUS']
        #     if not settings['WITH_FUTURE_DATES'] and hasattr(self, 'date'):
        #         if self.date.tzinfo is None:
        #             now = SafeDatetime.now()
        #         else:
        #             now = SafeDatetime.utcnow().replace(tzinfo=pytz.utc)
        #         if self.date > now:
        #             self.status = 'draft'
        #
        # # store the summary metadata if it is set
        # if 'summary' in metadata:
        #     self._summary = metadata['summary']

        signals.content_object_init.send(self)
示例#35
0
 def name(self, name):
     self._name = name
     self.slug = slugify(name, self.settings.get('SLUG_SUBSTITUTIONS', ()))
示例#36
0
文件: contents.py 项目: kmike/pelican
    def __init__(self, content, metadata=None, settings=None, filename=None):
        # init parameters
        if not metadata:
            metadata = {}
        if not settings:
            settings = _DEFAULT_CONFIG

        self._content = content
        self.translations = []

        self.status = "published"  # default value

        local_metadata = dict(settings.get('DEFAULT_METADATA', ()))
        local_metadata.update(metadata)

        # set metadata as attributes
        for key, value in local_metadata.items():
            setattr(self, key.lower(), value)
        
        # default author to the one in settings if not defined
        if not hasattr(self, 'author'):
            if 'AUTHOR' in settings:
                self.author = settings['AUTHOR']

        # manage languages
        self.in_default_lang = True
        if 'DEFAULT_LANG' in settings:
            default_lang = settings['DEFAULT_LANG'].lower()
            if not hasattr(self, 'lang'):
                self.lang = default_lang

            self.in_default_lang = (self.lang == default_lang)

        # create the slug if not existing, fro mthe title
        if not hasattr(self, 'slug') and hasattr(self, 'title'):
            self.slug = slugify(self.title)

        # create save_as from the slug (+lang)
        if not hasattr(self, 'save_as') and hasattr(self, 'slug'):
            if self.in_default_lang:
                self.save_as = '%s.html' % self.slug
                clean_url = '%s/' % self.slug
            else:
                self.save_as = '%s-%s.html' % (self.slug, self.lang)
                clean_url = '%s-%s/' % (self.slug, self.lang)

        # change the save_as regarding the settings
        if settings.get('CLEAN_URLS', False):
            self.url = clean_url
        elif hasattr(self, 'save_as'):
            self.url = self.save_as

        if filename:
            self.filename = filename

        # manage the date format
        if not hasattr(self, 'date_format'):
            if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']:
                self.date_format = settings['DATE_FORMATS'][self.lang]
            else:
                self.date_format = settings['DEFAULT_DATE_FORMAT']

        if hasattr(self, 'date'):
            self.locale_date = self.date.strftime(self.date_format.encode('ascii','xmlcharrefreplace')).decode('utf')

        # manage summary
        if not hasattr(self, 'summary'):
            self.summary = property(lambda self: truncate_html_words(self.content, 50)).__get__(self, Page)

        # manage status
        if not hasattr(self, 'status'):
            self.status = settings['DEFAULT_STATUS']
    def __init__(self,
                 content,
                 metadata=None,
                 settings=None,
                 source_path=None,
                 context=None):
        if metadata is None:
            metadata = {}
        if settings is None:
            settings = copy.deepcopy(DEFAULT_CONFIG)

        self.settings = settings
        self._content = content
        if context is None:
            context = {}
        self._context = context
        self.translations = []

        local_metadata = dict()
        local_metadata.update(metadata)

        # set metadata as attributes
        for key, value in local_metadata.items():
            if key in ('save_as', 'url'):
                key = 'override_' + key
            setattr(self, key.lower(), value)

        # also keep track of the metadata attributes available
        self.metadata = local_metadata

        # default template if it's not defined in page
        self.template = self._get_template()

        # First, read the authors from "authors", if not, fallback to "author"
        # and if not use the settings defined one, if any.
        if not hasattr(self, 'author'):
            if hasattr(self, 'authors'):
                self.author = self.authors[0]
            elif 'AUTHOR' in settings:
                self.author = Author(settings['AUTHOR'], settings)

        if not hasattr(self, 'authors') and hasattr(self, 'author'):
            self.authors = [self.author]

        # XXX Split all the following code into pieces, there is too much here.

        # manage languages
        self.in_default_lang = True
        if 'DEFAULT_LANG' in settings:
            default_lang = settings['DEFAULT_LANG'].lower()
            if not hasattr(self, 'lang'):
                self.lang = default_lang

            self.in_default_lang = (self.lang == default_lang)

        # create the slug if not existing, generate slug according to
        # setting of SLUG_ATTRIBUTE
        if not hasattr(self, 'slug'):
            if (settings['SLUGIFY_SOURCE'] == 'title'
                    and hasattr(self, 'title')):
                self.slug = slugify(self.title,
                                    settings.get('SLUG_SUBSTITUTIONS', ()))
            elif (settings['SLUGIFY_SOURCE'] == 'basename'
                  and source_path is not None):
                basename = os.path.basename(os.path.splitext(source_path)[0])
                self.slug = slugify(basename,
                                    settings.get('SLUG_SUBSTITUTIONS', ()))

        self.source_path = source_path

        # manage the date format
        if not hasattr(self, 'date_format'):
            if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']:
                self.date_format = settings['DATE_FORMATS'][self.lang]
            else:
                self.date_format = settings['DEFAULT_DATE_FORMAT']

        if isinstance(self.date_format, tuple):
            locale_string = self.date_format[0]
            if sys.version_info < (3, ) and isinstance(locale_string,
                                                       six.text_type):
                locale_string = locale_string.encode('ascii')
            locale.setlocale(locale.LC_ALL, locale_string)
            self.date_format = self.date_format[1]

        # manage timezone
        default_timezone = settings.get('TIMEZONE', 'UTC')
        timezone = getattr(self, 'timezone', default_timezone)

        if hasattr(self, 'date'):
            self.date = set_date_tzinfo(self.date, timezone)
            self.locale_date = strftime(self.date, self.date_format)

        if hasattr(self, 'modified'):
            self.modified = set_date_tzinfo(self.modified, timezone)
            self.locale_modified = strftime(self.modified, self.date_format)

        # manage status
        if not hasattr(self, 'status'):
            self.status = getattr(self, 'default_status', None)

        # store the summary metadata if it is set
        if 'summary' in metadata:
            self._summary = metadata['summary']

        signals.content_object_init.send(self)
示例#38
0
def main(group_id, location, time_boundary, event_status, pandoc):
    key_path = os.path.normpath(os.path.expanduser('~/.meetup.com-key'))
    if os.path.exists(key_path):
        with open(key_path) as fh:
            key = fh.read().strip()

    cache = FileCache('.web_cache', forever=True)
    requests = CacheControl(
        Session(), cache,
        cache_etags=False,
        heuristic=ExpiresAfter(days=1)
    )

    while True:
        resp = requests.get('https://api.meetup.com/status', params=dict(key=key))
        if resp.status_code == 200:
            break
        elif resp.status_code == 401:
            click.echo('Your meetup.com key is required. You can get it from https://secure.meetup.com/meetup_api/key/\n')

            if click.confirm('Open https://secure.meetup.com/meetup_api/key/ in your web browser?'):
                click.launch('https://secure.meetup.com/meetup_api/key/')

            click.echo('')
            key = click.prompt('Key', hide_input=True)
        else:
            click.fail('Failed to get meetup.com status. Response was {!r}'.format(resp.text))

    click.secho('For convenience your key is saved in `{}`.\n'.format(key_path), fg='magenta')
    with open(key_path, 'w') as fh:
        fh.write(key)

    while not location:
        location = location or get_input('Location: ', completer=WordCompleter(['cluj', 'iasi', 'timisoara'], ignore_case=True))

    while True:
        group_id = group_id or get_input('Group ID: ', completer=WordCompleter(['Cluj-py', 'RoPython-Timisoara'], ignore_case=True))

        resp = requests.get('https://api.meetup.com/2/events', params=dict(
            key=key,
            group_urlname=group_id,
            time=time_boundary,
            status=event_status,
        ))
        if resp.status_code == 200:
            json = resp.json()
            if json['results']:
                break
            else:
                click.secho('Invalid group `{}`. It has no events!'.format(group_id), fg='red')
                group_id = None
        if resp.status_code == '400':
            click.fail('Failed to get make correct request. Response was {!r}'.format(resp.text))
        else:
            click.secho('Invalid group `{}`. Response was [{}] {!r}'.format(group_id, resp.status_code, resp.text), fg='red')

    # click.echo(pformat(dict(resp.headers)))

    for event in json['results']:
        dt = datetime.fromtimestamp(event['time']/1000)
        click.echo("{}: {}".format(
            dt.strftime('%Y-%m-%d %H:%M:%S'),
            event['name']
        ))
        existing_path = glob(os.path.join('content', '*', dt.strftime('%Y-%m-%d*'), 'index.rst'))
        if existing_path:
            if len(existing_path) > 1:
                click.secho('\tERROR: multiple paths matched: {}'.format(existing_path))
            else:
                click.secho('\t`{}` already exists. Not importing.'.format(*existing_path), fg='yellow')
        else:
            target_dir = os.path.join('content', location, '{}-{}'.format(dt.strftime('%Y-%m-%d'), slugify(event['name'])))
            target_path = os.path.join(target_dir, 'index.rst')
            if not os.path.exists(target_dir):
                os.makedirs(target_dir)

            if pandoc:
                with tempfile.NamedTemporaryFile(delete=False) as fh:
                    fh.write(event['description'].encode('utf-8'))
                rst = subprocess.check_output(['pandoc', '--from=html', '--to=rst', fh.name]).decode('utf-8')
                print fh.name
                #os.unlink(fh.name)
            else:
                stream = StringIO()
                html2rest(event['description'].encode('utf-8'), writer=stream)
                rst = stream.getvalue().decode('utf-8')

            with io.open(target_path, 'w', encoding='utf-8') as fh:
                fh.write('''{name}
###############################################################

:tags: unknown
:registration:
    meetup.com: {event_url}

{rst}'''.format(rst=rst, **event))
            click.secho('\tWrote `{}`.'.format(target_path), fg='green')
示例#39
0
    def _generate_mbox_articles(self, mboxPath, mboxCategory):

        baseReader = BaseReader(self.settings)
        category = baseReader.process_metadata('category', mboxCategory)

        # Complain if the mbox path does not exist and is not readable.
        try:
            if not os.path.exists(mboxPath):
                raise RuntimeError
            mbox = mailbox.mbox(mboxPath)
        except:
            logger.error('Could not process mbox file %s', mboxPath)
            return

        # Retrieve some fields from the settings.
        authorString = self.settings.get('MBOX_AUTHOR_STRING')
        markdownify = self.settings.get('MBOX_MARKDOWNIFY')

        # Loop over all messages, turn them into article objects.
        all_articles = []
        slugs = []

        for message in mbox.itervalues():
            # Get author name.
            author = message['from']
            if author is None:
                author = 'Unknown'
            else:
                if '<' and '>' in author:
                    author = author[:author.find(' <')]
                author = author.replace('"', '').replace("'", '')
            # As a hack to avoid dealing with the fact that names can collide.
            if authorString is not None and authorString != '':
                author += ' ' + authorString
            authorObject = baseReader.process_metadata('author', author)

            # Get date object, using python-dateutil as an easy hack.
            # If there is no date in the message, abort, we shouldn't bother.
            if message['date'] is None:
                continue
            if parser:
                date = parser.parse(message['date'])
            else:
                logger.error('No python-dateutil, we cannot continue as ' +
                             'date formats cannot be parsed. ')
                continue
            monthYear = date.strftime('%B-%Y').lower()

            # Get title and slug; build year + month into slug.
            subject = message['subject']
            slugSubject = slugify(subject)
            slug = os.path.join(slugify(mboxCategory), monthYear, slugSubject)

            # Hack to handle multiple messages with the same subject.
            if slug in slugs:
                slug += "_%d"
                count = 2
                testSlug = slug % count
                while testSlug in slugs:
                    count += 1
                    testSlug = slug % count
                slug = testSlug
            slugs.append(slug)

            # Code adapted from Stackoverflow for parsing email messages.
            # https://stackoverflow.com/questions/4824376/parse-multi-part-email-with-sub-parts-using-python
            # Code is clumsy, should be refactored.
            if message.is_multipart():
                plaintext = None
                html = None
                for part in message.get_payload():
                    payload = part.get_payload(decode=True)
                    if payload is not None:
                        for charset in message.get_charsets():
                            if charset is not None and charset != 'x-unknown':
                                # These probably shoudldn't be 'ignore'.
                                if sys.version_info.major >= 3 and not isinstance(
                                        payload, str):
                                    payload = payload.decode(charset, "ignore")
                                elif sys.version_info.major <= 2:
                                    payload = unicode(payload, charset,
                                                      "ignore").encode(
                                                          "ascii", "replace")
                    if part.get_content_type() == 'text/plain':
                        plaintext = payload
                    if part.get_content_type() == 'text/html':
                        html = payload
                if plaintext is None and html is None:
                    continue
                elif plaintext is None:
                    content = html
                else:
                    if sys.version_info.major >= 3 and isinstance(
                            plaintext, bytes):
                        plaintext = plaintext.decode("utf-8", "ignore")
                    content = plaintext_to_html(plaintext, markdownify)
            else:
                payload = message.get_payload(decode=True)
                for charset in message.get_charsets():
                    if charset is not None and charset != 'x-unknown':
                        if sys.version_info.major < 3:
                            payload = unicode(payload, charset,
                                              "ignore").encode(
                                                  "ascii", "replace")
                        else:
                            payload = payload.decode(charset)
                if sys.version_info.major >= 3 and isinstance(payload, bytes):
                    payload = payload.decode("utf-8", "ignore")
                content = plaintext_to_html(payload, markdownify)

            # On python 2, it seems that we need to do this final check of content.
            if sys.version_info.major <= 2:
                content = unicode(content, "us-ascii",
                                  "ignore").encode("ascii", "replace")

            metadata = {
                'title': subject,
                'date': date,
                'category': category,
                'authors': [authorObject],
                'slug': slug
            }

            article = Article(content=content,
                              metadata=metadata,
                              settings=self.settings,
                              source_path=mboxPath,
                              context=self.context)

            # This seems like it cannot happen... but it does without fail.
            article.author = article.authors[0]
            all_articles.append(article)

        return all_articles
示例#40
0
def tumblr2fields(api_key, blogname):
    """ Imports Tumblr posts (API v2)"""
    import json
    import urllib.request as urllib_request

    def get_tumblr_posts(api_key, blogname, offset=0):
        url = ("https://api.tumblr.com/v2/blog/%s.tumblr.com/"
               "posts?api_key=%s&offset=%d&filter=raw") % (blogname, api_key,
                                                           offset)
        request = urllib_request.Request(url)
        handle = urllib_request.urlopen(request)
        posts = json.loads(handle.read().decode('utf-8'))
        return posts.get('response').get('posts')

    offset = 0
    posts = get_tumblr_posts(api_key, blogname, offset)
    settings = read_settings()
    subs = settings['SLUG_REGEX_SUBSTITUTIONS']
    while len(posts) > 0:
        for post in posts:
            title = \
                post.get('title') or \
                post.get('source_title') or \
                post.get('type').capitalize()
            slug = post.get('slug') or slugify(title, regex_subs=subs)
            tags = post.get('tags')
            timestamp = post.get('timestamp')
            date = SafeDatetime.fromtimestamp(
                int(timestamp)).strftime("%Y-%m-%d %H:%M:%S")
            slug = SafeDatetime.fromtimestamp(
                int(timestamp)).strftime("%Y-%m-%d-") + slug
            format = post.get('format')
            content = post.get('body')
            type = post.get('type')
            if type == 'photo':
                if format == 'markdown':
                    fmtstr = '![%s](%s)'
                else:
                    fmtstr = '<img alt="%s" src="%s" />'
                content = ''
                for photo in post.get('photos'):
                    content += '\n'.join(
                        fmtstr % (photo.get('caption'),
                                  photo.get('original_size').get('url')))
                content += '\n\n' + post.get('caption')
            elif type == 'quote':
                if format == 'markdown':
                    fmtstr = '\n\n&mdash; %s'
                else:
                    fmtstr = '<p>&mdash; %s</p>'
                content = post.get('text') + fmtstr % post.get('source')
            elif type == 'link':
                if format == 'markdown':
                    fmtstr = '[via](%s)\n\n'
                else:
                    fmtstr = '<p><a href="%s">via</a></p>\n'
                content = fmtstr % post.get('url') + post.get('description')
            elif type == 'audio':
                if format == 'markdown':
                    fmtstr = '[via](%s)\n\n'
                else:
                    fmtstr = '<p><a href="%s">via</a></p>\n'
                content = fmtstr % post.get('source_url') + \
                    post.get('caption') + \
                    post.get('player')
            elif type == 'video':
                if format == 'markdown':
                    fmtstr = '[via](%s)\n\n'
                else:
                    fmtstr = '<p><a href="%s">via</a></p>\n'
                source = fmtstr % post.get('source_url')
                caption = post.get('caption')
                players = '\n'.join(
                    player.get('embed_code') for player in post.get('player'))
                content = source + caption + players
            elif type == 'answer':
                title = post.get('question')
                content = ('<p>'
                           '<a href="%s" rel="external nofollow">%s</a>'
                           ': %s'
                           '</p>\n'
                           ' %s' %
                           (post.get('asking_name'), post.get('asking_url'),
                            post.get('question'), post.get('answer')))

            content = content.rstrip() + '\n'
            kind = 'article'
            status = 'published'  # TODO: Find a way for draft posts

            yield (title, content, slug, date, post.get('blog_name'), [type],
                   tags, status, kind, format)

        offset += len(posts)
        posts = get_tumblr_posts(api_key, blogname, offset)
示例#41
0
    def generate_context(self):
        """change the context"""

        # return the list of files to use
        files = self.get_files(self.path, exclude=[
            'pages',
        ])
        all_articles = []
        for f in files:

            try:
                content, metadata = read_file(f, settings=self.settings)
            except Exception, e:
                warning(u'Could not process %s\n%s' % (f, str(e)))
                continue

            # if no category is set, use the name of the path as a category
            if 'category' not in metadata.keys():

                if os.path.dirname(f) == self.path:
                    category = self.settings['DEFAULT_CATEGORY']
                else:
                    category = os.path.basename(
                        os.path.dirname(f)).decode('utf-8')

                if category != '':
                    metadata['category'] = unicode(category)

            if 'date' not in metadata.keys()\
                and self.settings['FALLBACK_ON_FS_DATE']:
                metadata['date'] = datetime.datetime.fromtimestamp(
                    os.stat(f).st_ctime)

            article = Article(content,
                              metadata,
                              settings=self.settings,
                              filename=f)
            if not is_valid_content(article, f):
                continue

            add_to_url = u''
            if 'ARTICLE_PERMALINK_STRUCTURE' in self.settings:
                article_permalink_structure = self.settings[
                    'ARTICLE_PERMALINK_STRUCTURE']
                article_permalink_structure = article_permalink_structure.lstrip(
                    '/').replace('%(', "%%(")

                # try to substitute any python datetime directive
                add_to_url = article.date.strftime(article_permalink_structure)
                # try to substitute any article metadata in rest file
                add_to_url = add_to_url % article.__dict__
                add_to_url = [slugify(i) for i in add_to_url.split('/')]
                add_to_url = os.path.join(*add_to_url)

            article.url = urlparse.urljoin(add_to_url, article.url)
            article.save_as = urlparse.urljoin(add_to_url, article.save_as)

            if article.status == "published":
                if hasattr(article, 'tags'):
                    for tag in article.tags:
                        self.tags[tag].append(article)
                all_articles.append(article)
            elif article.status == "draft":
                self.drafts.append(article)
示例#42
0
    def read(self, filename):
        with open(filename, 'rt', encoding='UTF-8') as f:
            json_data = json.loads(f.read())

        data_path = str(pathlib.Path(filename).resolve().relative_to(self._absolute_data_path))
        videos = list()
        iframe_types = ["youtube", "vimeo", "wistia"]
        html5_types = ["ogg", "mp4", "webm"]
        if 'videos' in json_data and isinstance(json_data['videos'], list) and len(json_data['videos']) > 0:
            for v in json_data['videos']:
                v_data = dict()
                v_data["type"] = v["type"]
                v_data['label'] = _convert_to_label(v['type'])
                v_data['icon'] = _convert_to_icon(v['type'])
                v_data["source_url"] = _get_and_check_none(v, 'url', '')
                v_data["media_url"] = _get_media_url(v["url"], media_type=v["type"])
                if v["type"] in iframe_types:
                    v_data["tag_type"] = "iframe"
                elif v["type"] in html5_types:
                    v_data["tag_type"] = "html5"

                videos.append(v_data)
        else:
            # Handle data which doesn't have the videos list
            v_data = dict()
            v_data["source_url"] = _get_and_check_none(json_data, 'source_url', '')
            v_data["media_url"] = _get_media_url(v_data['source_url'])
            if "youtube" in v_data["media_url"]:
                v_data["type"] = "youtube"
                v_data["tag_type"] = "iframe"
            elif "vimeo" in v_data["media_url"]:
                v_data["type"] = "vimeo"
                v_data["tag_type"] = "iframe"
            elif v_data["media_url"].endswith(".mp4"):
                v_data["type"] = "mp4"
                v_data["tag_type"] = "html5"
            v_data['label'] = _convert_to_label(v_data['type'])
            v_data['icon'] = _convert_to_icon(v_data['type'])
            videos.append(v_data)

        category_data = _get_category_data(filename)
        category = _get_and_check_none(category_data, 'title')
        if not category:
            category = _get_and_check_none(
                json_data,
                'category',
                self.settings['DEFAULT_CATEGORY']
            )

        title = _get_and_check_none(json_data, 'title', 'Title')
        slug = _get_and_check_none(json_data, 'slug')
        if slug is None:
            slug = slugify(title)

        metadata = {
            'title': title,
            'category': category,
            'tags': _get_and_check_none(json_data, 'tags', []),
            'date': _get_and_check_none(json_data, 'recorded', '1990-01-01'),
            'slug': slug,
            'authors': _get_and_check_none(json_data, 'speakers', []),
            'videos': videos,
            'data_path': data_path,
            'media_url': _get_media_url(_get_and_check_none(json_data, 'source_url', '')),
            'thumbnail_url': _get_and_check_none(json_data, 'thumbnail_url', '/images/default_thumbnail_url.png'),
            'language': _get_and_check_none(json_data, 'language', ''),
            'related_urls': map_related_urls(_get_and_check_none(json_data, 'related_urls', [])),
        }

        alias = _get_and_check_none(json_data, 'alias')
        if alias:
            if not alias.endswith('/'):
                alias += '/'
            metadata['alias'] = alias

        # Send metadata through pelican parser to check pelican required formats
        metadata = {key: self.process_metadata(key, value) for key, value in metadata.items()}

        content = []
        for part in ['summary', 'description']:
            json_part = json_data.get(part)
            if json_part:
                content.append('<h3>{}</h3>'.format(part.title()))
                try:
                    publisher = self._get_publisher(json_part, filename)
                    content.append(publisher.writer.parts.get('body'))
                except SystemExit:
                    log.warn(
                        "Invalid ReST markup in %s['%s']. Rendering as plain text.",
                        filename.replace(self.settings.get('PATH'), "").strip("/"),
                        part
                    )
                    content.append('<pre>{}</pre>'.format(json_part))

        return "".join(content), metadata
示例#43
0
def fields2pelican(fields,
                   out_markup,
                   output_path,
                   dircat=False,
                   strip_raw=False,
                   disable_slugs=False,
                   dirpage=False,
                   filename_template=None,
                   filter_author=None):
    for (title, content, filename, date, author, categories, tags, kind,
         in_markup) in fields:
        if filter_author and filter_author != author:
            continue
        slug = not disable_slugs and filename or None
        if (in_markup == "markdown") or (out_markup == "markdown"):
            ext = '.md'
            header = build_markdown_header(title, date, author, categories,
                                           tags, slug)
        else:
            out_markup = "rst"
            ext = '.rst'
            header = build_header(title, date, author, categories, tags, slug)

        filename = os.path.basename(filename)

        # Enforce filename restrictions for various filesystems at once; see
        # http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
        # we do not need to filter words because an extension will be appended
        filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename)  # invalid chars
        filename = filename.lstrip('.')  # should not start with a dot
        if not filename:
            filename = '_'
        filename = filename[:249]  # allow for 5 extra characters

        # option to put page posts in pages/ subdirectory
        if dirpage and kind == 'page':
            pages_dir = os.path.join(output_path, 'pages')
            if not os.path.isdir(pages_dir):
                os.mkdir(pages_dir)
            out_filename = os.path.join(pages_dir, filename + ext)
        # option to put files in directories with categories names
        elif dircat and (len(categories) > 0):
            catname = slugify(categories[0])
            out_filename = os.path.join(output_path, catname, filename + ext)
            if not os.path.isdir(os.path.join(output_path, catname)):
                os.mkdir(os.path.join(output_path, catname))
        else:
            out_filename = os.path.join(output_path, filename + ext)

        print(out_filename)

        if in_markup in ("html", "wp-html"):
            html_filename = os.path.join(output_path, filename + '.html')

            with open(html_filename, 'w', encoding='utf-8') as fp:
                # Replace newlines with paragraphs wrapped with <p> so
                # HTML is valid before conversion
                if in_markup == "wp-html":
                    new_content = decode_wp_content(content)
                else:
                    paragraphs = content.splitlines()
                    paragraphs = ['<p>{0}</p>'.format(p) for p in paragraphs]
                    new_content = ''.join(paragraphs)

                fp.write(new_content)

            parse_raw = '--parse-raw' if not strip_raw else ''
            cmd = ('pandoc --normalize --reference-links {0} --from=html'
                   ' --to={1} -o "{2}" "{3}"').format(parse_raw, out_markup,
                                                      out_filename,
                                                      html_filename)

            try:
                rc = subprocess.call(cmd, shell=True)
                if rc < 0:
                    error = "Child was terminated by signal %d" % -rc
                    exit(error)

                elif rc > 0:
                    error = "Please, check your Pandoc installation."
                    exit(error)
            except OSError as e:
                error = "Pandoc execution failed: %s" % e
                exit(error)

            os.remove(html_filename)

            with open(out_filename, 'r', encoding='utf-8') as fs:
                content = fs.read()
                if out_markup == "markdown":
                    # In markdown, to insert a <br />, end a line with two or more spaces & then a end-of-line
                    content = content.replace("\\\n ", "  \n")
                    content = content.replace("\\\n", "  \n")

        with open(out_filename, 'w', encoding='utf-8') as fs:
            fs.write(header + content)
示例#44
0
 def slug(self):
     if self._slug is None:
         self._slug = slugify(self.name,
                              regex_subs=self.settings.get(
                                  'SLUG_REGEX_SUBSTITUTIONS', []))
     return self._slug
示例#45
0
def main():

    print('''Welcome to pelican-post v{v}.

This script will help you create a new pelican post.

Please answer the following questions so this script can generate the post.
    '''.format(v=__version__))

    CONF = {}

    # where to save post
    cwd = os.path.dirname(os.path.abspath(__file__))
    dirpath = os.path.join(cwd, 'content', 'posts')
    rel = os.path.relpath(dirpath, cwd)
    # print(f'{cwd} \n {dirpath} \n {rel}')

    CONF['basedir'] = os.path.abspath(
        os.path.expanduser(
            ask('Where do you want to save the markdown file?',
                answer=str,
                default=rel)))

    CONF['title'] = ask('What will be the title of this post?',
                        answer=str,
                        default='NEW TITLE')

    default_slug = slugify(CONF['title'],
                           DEFAULT_CONFIG['SLUG_REGEX_SUBSTITUTIONS'])

    CONF['slug'] = ask('What will be the slug of this post?',
                       answer=str,
                       default=default_slug)

    
    
    extension = '.md'
    dest_file = os.path.join(CONF['basedir'], CONF['slug'] + extension)

    if os.path.exists(dest_file):
        random_chars = uuid.uuid4().hex.upper()[0:6]
        print(
            f'{CONF["slug"]} file already exists, \
                add a random string {random_chars} to slug'
        )
        CONF['slug'] = CONF['slug'] + random_chars
        dest_file = os.path.join(CONF['basedir'], CONF['slug'] + extension)

    time_format = "%Y-%m-%d %H:%M"
    time_string = datetime.datetime.now().strftime(time_format)

    CONF['date'] = ask('What will be the date/time of the post?',
                       answer=str,
                       default=time_string)

    CONF['modified'] = CONF['date']

    # try reading tags, wrap in try block
    # need markdown package in order to read markdown ###
    settings = read_settings('pelicanconf.py', override={})
    pelican = Pelican(settings)
    context = settings.copy()

    context['generated_content'] = {}
    context['static_links'] = set()
    context['static_content'] = {}
    context['localsiteurl'] = settings['SITEURL']

    generators = [
        cls(
            context=context,
            settings=pelican.settings,
            path=pelican.path,
            theme=pelican.theme,
            output_path=pelican.output_path,
        ) for cls in pelican.get_generator_classes()
    ]

    for p in generators:
        if hasattr(p, 'generate_context'):
            p.generate_context()

    tags = context['tags']
    tag_names = [t.name for t, _ in tags]
    tag_names = sorted(tag_names)

    print('> Tags already used in other posts (choose one or more): ')
    for no, tag in enumerate(tag_names, start=1):
        print(f'    {no} -> {tag}')

    rv_tags = ask(
        'Choose one or more tags (e.g. 1, 4)'
        ' 0 to enter new tag(s)',
        answer=str,
        default='0')

    input_tags = False
    if rv_tags == '0':
        tags_str = ask('Enter new tag(s), (e.g. pelican, python)',
                       answer=str,
                       default='')
        tags_str = tags_str.lower()
        input_tags = True
    else:
        index_tags = rv_tags.split(',')
        index_tags = [int(i) - 1 for i in index_tags]
        tags_value = [tag_names[ind] for ind in index_tags]
        tags_str = ', '.join(tags_value)

    CONF['tags'] = tags_str


    CONF['hasrp'] = ask('Do you want to add related posts?',
                        answer=bool,
                        default=False)

    if CONF['hasrp']:
        if input_tags:
            print('Tags are new input, enter related posts in text editor...')
            CONF['related_posts'] = ' '
        else:
            all_articles = []
            for tag_name in tags_value:
                for t, articles in tags:
                    if t.name == tag_name:
                        all_articles = all_articles + articles
            all_articles = list(dict.fromkeys(all_articles))
            print('> Posts with same tags (choose one or more) ')
            for no, art in enumerate(all_articles, start=1):
                print(f'    {no} -> {art.title}')

            rv_art = ask(
                'Choose one or more articles (e.g. 1, 3)'
                ' 0 to cancel',
                answer=str,
                default='0')

            if rv_art == '0':
                CONF['hasrp'] = False
            else:
                index_art = rv_art.split(',')
                index_art = [int(i) - 1 for i in index_art]
                related = [all_articles[ind].slug for ind in index_art]
                CONF['related_posts'] = ', '.join(related)

    top_no = 0
    for art in context['articles']:
        try:
            if int(art.no) > top_no:
                top_no = int(art.no)
        except AttributeError as e:
            # print('Warning: {0}'.format(e))
            pass
    top_no = top_no + 1
    CONF['no'] = ask('Add a number for the post (start from 1)',
                     answer=int,
                     default=top_no)

    CONF['note'] = ask('Add a short note to yourself for this post?',
                       answer=str,
                       default='note to be added')

    CONF['first_sentence'] = ask('Write the first sentence of the article',
                                 answer=str,
                                 default='This is the first sentence ...')

    try:
        with open(dest_file, 'w') as fd:
            _template = Environment(
                loader=BaseLoader).from_string(markdown_temp)
            fd.write(_template.render(**CONF))
    except OSError as e:
        print('Error: {0}'.format(e))

    print('Done. Your new post is available at %s' % rel)
    # import pprint
    # pprint.pprint(CONF)
    # pprint.pprint(tags_str)
    # pprint.pprint(context)

    return
示例#46
0
 def _normalize_key(self, key):
     subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
     return six.text_type(slugify(key, regex_subs=subs))
示例#47
0
    def __init__(self, content, metadata=None, settings=None,
                 source_path=None, context=None):
        if metadata is None:
            metadata = {}
        if settings is None:
            settings = copy.deepcopy(DEFAULT_CONFIG)

        self.settings = settings
        self._content = content
        if context is None:
            context = {}
        self._context = context
        self.translations = []

        local_metadata = dict(settings['DEFAULT_METADATA'])
        local_metadata.update(metadata)

        # set metadata as attributes
        for key, value in local_metadata.items():
            if key in ('save_as', 'url'):
                key = 'override_' + key
            setattr(self, key.lower(), value)

        # also keep track of the metadata attributes available
        self.metadata = local_metadata

        #default template if it's not defined in page
        self.template = self._get_template()

        # default author to the one in settings if not defined
        if not hasattr(self, 'author'):
            if 'AUTHOR' in settings:
                self.author = Author(settings['AUTHOR'], settings)

        # XXX Split all the following code into pieces, there is too much here.

        # manage languages
        self.in_default_lang = True
        if 'DEFAULT_LANG' in settings:
            default_lang = settings['DEFAULT_LANG'].lower()
            if not hasattr(self, 'lang'):
                self.lang = default_lang

            self.in_default_lang = (self.lang == default_lang)

        # create the slug if not existing, from the title
        if not hasattr(self, 'slug') and hasattr(self, 'title'):
            self.slug = slugify(self.title,
                                settings.get('SLUG_SUBSTITUTIONS', ()))

        self.source_path = source_path

        # manage the date format
        if not hasattr(self, 'date_format'):
            if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']:
                self.date_format = settings['DATE_FORMATS'][self.lang]
            else:
                self.date_format = settings['DEFAULT_DATE_FORMAT']

        if isinstance(self.date_format, tuple):
            locale_string = self.date_format[0]
            if sys.version_info < (3, ) and isinstance(locale_string,
                                                       six.text_type):
                locale_string = locale_string.encode('ascii')
            locale.setlocale(locale.LC_ALL, locale_string)
            self.date_format = self.date_format[1]

        if hasattr(self, 'date'):
            self.locale_date = strftime(self.date, self.date_format)

        # manage status
        if not hasattr(self, 'status'):
            self.status = settings['DEFAULT_STATUS']
            if not settings['WITH_FUTURE_DATES']:
                if hasattr(self, 'date') and self.date > datetime.now():
                    self.status = 'draft'

        # store the summary metadata if it is set
        if 'summary' in metadata:
            self._summary = metadata['summary']

        signals.content_object_init.send(self)
示例#48
0
    def __init__(self, content, metadata=None, settings=None, filename=None):
        # init parameters
        if not metadata:
            metadata = {}
        if not settings:
            settings = _DEFAULT_CONFIG

        self._content = content
        self.translations = []

        local_metadata = dict(settings.get('DEFAULT_METADATA', ()))
        local_metadata.update(metadata)

        # set metadata as attributes
        for key, value in local_metadata.items():
            setattr(self, key.lower(), value)
        
        # default author to the one in settings if not defined
        if not hasattr(self, 'author'):
            if 'AUTHOR' in settings:
                self.author = settings['AUTHOR']
            else:
                self.author = getenv('USER', 'John Doe')
                warning(u"Author of `{0}' unknow, assuming that his name is `{1}'".format(filename or self.title, self.author))

        # manage languages
        self.in_default_lang = True
        if 'DEFAULT_LANG' in settings:
            default_lang = settings['DEFAULT_LANG'].lower()
            if not hasattr(self, 'lang'):
                self.lang = default_lang

            self.in_default_lang = (self.lang == default_lang)

        # create the slug if not existing, fro mthe title
        if not hasattr(self, 'slug') and hasattr(self, 'title'):
            self.slug = slugify(self.title)

        # create save_as from the slug (+lang)
        if not hasattr(self, 'save_as') and hasattr(self, 'slug'):
            if self.in_default_lang:
                if settings.get('CLEAN_URLS', False):
                    self.save_as = '%s/index.html' % self.slug
                else:
                    self.save_as = '%s.html' % self.slug

                clean_url = '%s/' % self.slug
            else:
                if settings.get('CLEAN_URLS', False):
                    self.save_as = '%s-%s/index.html' % (self.slug, self.lang)
                else:
                    self.save_as = '%s-%s.html' % (self.slug, self.lang)

                clean_url = '%s-%s/' % (self.slug, self.lang)

        # change the save_as regarding the settings
        if settings.get('CLEAN_URLS', False):
            self.url = clean_url
        elif hasattr(self, 'save_as'):
            self.url = self.save_as

        if filename:
            self.filename = filename

        # manage the date format
        if not hasattr(self, 'date_format'):
            if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']:
                self.date_format = settings['DATE_FORMATS'][self.lang]
            else:
                self.date_format = settings['DEFAULT_DATE_FORMAT']

        if hasattr(self, 'date'):
            if platform == 'win32':
                self.locale_date = self.date.strftime(self.date_format.encode('ascii','xmlcharrefreplace')).decode(stdin.encoding)
            else:
                self.locale_date = self.date.strftime(self.date_format.encode('ascii','xmlcharrefreplace')).decode('utf')
        
        # manage status
        if not hasattr(self, 'status'):
            self.status = settings['DEFAULT_STATUS']
            if not settings['WITH_FUTURE_DATES']:
                if hasattr(self, 'date') and self.date > datetime.now():
                    self.status = 'draft'
        
        # set summary
        if not hasattr(self, 'summary'):
            self.summary = truncate_html_words(self.content, 50)
示例#49
0
    def slug(self):
        slug_override = self.get('slug')
        if slug_override:
            return slug_override

        return slugify(self.get('title', 'missing-title'))
示例#50
0
    def read(self, filename):
        metadata = {
            "category": "Gwent_Deck",
            "date": "2020-04-13",
            "template": "gwent_deck",
        }

        deck_data = []
        description = []
        leader = None
        stratagem = None

        with open(filename, "r", encoding="utf-8") as fin:
            for line in fin:
                if line.startswith("//"):
                    tag, value = parse_meta(line)
                    metadata[tag.lower()] = value
                elif line.startswith("---"):
                    # This is the description, read until end of file
                    for dl in fin:
                        description.append(dl.strip())
                elif line.strip() != "":
                    card_count, card_name = parse_card_line(line)
                    card_version = metadata["gwent_version"]
                    self.add_card_data(card_name, card_version)

                    card_data = {
                        "name": card_name,
                        "count": card_count,
                        "data": self.cached_data[card_version][card_name],
                    }

                    if (self.cached_data[card_version][card_name]["category"]
                            == "Leader"):
                        leader = card_data
                    elif (self.cached_data[card_version][card_name]["type"] ==
                          "stratagem"):
                        stratagem = card_data
                    else:
                        deck_data.append(card_data)

        self.write_cache()

        metadata["title"] = metadata["name"]
        metadata["slug"] = slugify(
            metadata["title"] + "_" + metadata["gwent_version"],
            regex_subs=self.settings.get("SLUG_REGEX_SUBSTITUTIONS", []),
        )
        metadata["description"] = "\n".join(description)
        metadata[
            "url"] = f"gwent/{metadata['gwent_version']}/{metadata['slug']}/"
        metadata["save_as"] = f"{metadata['url']}index.html"

        parsed = {
            "provisions": 0,
            "units": 0,
            "scraps": 0,
            "cards": sum([c["count"] for c in deck_data]),
        }

        for card in deck_data + [stratagem]:
            parsed["provisions"] += card["data"]["provision"] * card["count"]
            if card["data"]["type"] == "unit":
                parsed["units"] += card["count"]
            if card["data"]["rarity"] == "legendary":
                parsed["scraps"] += 800 * card["count"]
            elif card["data"]["rarity"] == "epic":
                parsed["scraps"] += 200 * card["count"]
            elif card["data"]["rarity"] == "rare":
                parsed["scraps"] += 80 * card["count"]
            else:
                parsed["scraps"] += 30 * card["count"]

        for key, value in metadata.items():
            parsed[key] = self.process_metadata(key, value)

        parsed["deck"] = deck_data
        parsed["leader"] = leader
        parsed["stratagem"] = stratagem

        parsed["stats"] = {
            "provisions": [],
            "cumulative_provisions": [],
            "card_colors": [],
            "labels": [],
        }

        for card in sorted(deck_data, key=lambda x: x["data"]["provision"]):
            for _ in range(card["count"]):
                parsed["stats"]["provisions"].append(
                    int(card["data"]["provision"]))
                parsed["stats"]["card_colors"].append(card["data"]["color"])
                parsed["stats"]["labels"].append(card["data"]["name"])

        parsed["stats"]["cumulative_provisions"] = list(
            accumulate(parsed["stats"]["provisions"]))

        return "", parsed
示例#51
0
 def __init__(self, name, settings):
     self.name = name
     self.slug = slugify(self.name)
     self.settings = settings
示例#52
0
    def __init__(self, content, metadata=None, settings=None, filename=None):
        # init parameters
        if not metadata:
            metadata = {}
        if not settings:
            settings = copy.deepcopy(_DEFAULT_CONFIG)

        self.settings = settings
        self._content = content
        self.translations = []

        local_metadata = dict(settings.get('DEFAULT_METADATA', ()))
        local_metadata.update(metadata)

        # set metadata as attributes
        for key, value in local_metadata.items():
            setattr(self, key.lower(), value)

        # also keep track of the metadata attributes available
        self.metadata = local_metadata

        #default template if it's not defined in page
        self.template = self._get_template()

        # default author to the one in settings if not defined
        if not hasattr(self, 'author'):
            if 'AUTHOR' in settings:
                self.author = Author(settings['AUTHOR'], settings)
            else:
                title = filename.decode('utf-8') if filename else self.title
                self.author = Author(getenv('USER', 'John Doe'), settings)
                logger.warning(
                    u"Author of `{0}' unknown, assuming that his name is "
                    "`{1}'".format(title, self.author))

        # manage languages
        self.in_default_lang = True
        if 'DEFAULT_LANG' in settings:
            default_lang = settings['DEFAULT_LANG'].lower()
            if not hasattr(self, 'lang'):
                self.lang = default_lang

            self.in_default_lang = (self.lang == default_lang)

        # create the slug if not existing, from the title
        if not hasattr(self, 'slug') and hasattr(self, 'title'):
            self.slug = slugify(self.title)

        if filename:
            self.filename = filename

        # manage the date format
        if not hasattr(self, 'date_format'):
            if hasattr(self, 'lang') and self.lang in settings['DATE_FORMATS']:
                self.date_format = settings['DATE_FORMATS'][self.lang]
            else:
                self.date_format = settings['DEFAULT_DATE_FORMAT']

        if isinstance(self.date_format, tuple):
            locale.setlocale(locale.LC_ALL, self.date_format[0])
            self.date_format = self.date_format[1]

        if hasattr(self, 'date'):
            encoded_date = self.date.strftime(
                self.date_format.encode('ascii', 'xmlcharrefreplace'))

            if platform == 'win32':
                self.locale_date = encoded_date.decode(stdin.encoding)
            else:
                self.locale_date = encoded_date.decode('utf')

        # manage status
        if not hasattr(self, 'status'):
            self.status = settings['DEFAULT_STATUS']
            if not settings['WITH_FUTURE_DATES']:
                if hasattr(self, 'date') and self.date > datetime.now():
                    self.status = 'draft'

        # store the summary metadata if it is set
        if 'summary' in metadata:
            self._summary = metadata['summary']
示例#53
0
 def _normalize_key(self, key):
     subs = self.settings.get('SLUG_SUBSTITUTIONS', ())
     return six.text_type(slugify(key, subs))
示例#54
0
def main(group_id, location, time_boundary, event_status, pandoc, force):
    key_path = os.path.normpath(os.path.expanduser('~/.meetup.com-key'))
    if os.path.exists(key_path):
        with io.open(key_path, encoding='utf8') as fh:
            key = fh.read().strip()
    else:
        key = None
    cache = FileCache('.web_cache', forever=True)
    requests = CacheControl(Session(),
                            cache,
                            cache_etags=False,
                            heuristic=ExpiresAfter(days=1))

    while True:
        resp = requests.get('https://api.meetup.com/status',
                            params=dict(key=key))
        if resp.status_code == 200 and resp.json().get('status') == 'ok':
            break
        elif resp.status_code == 200 and any(
                'auth_fail' == e.code for e in resp.json().get('errors', [])):
            click.echo(
                'Your meetup.com key is required. You can get it from https://secure.meetup.com/meetup_api/key/\n'
            )

            if click.confirm(
                    'Open https://secure.meetup.com/meetup_api/key/ in your web browser?'
            ):
                click.launch('https://secure.meetup.com/meetup_api/key/')

            click.echo('')
            key = click.prompt('Key', hide_input=True)
        else:
            raise click.ClickException(
                'Failed to get meetup.com status. Response was {!r} {!r}'.
                format(resp.status_code, resp.text))

    click.secho(
        'For convenience your key is saved in `{}`.\n'.format(key_path),
        fg='magenta')
    with open(key_path, 'w') as fh:
        fh.write(key)

    while not location:
        location = location or get_input(
            u'Location: ',
            completer=WordCompleter(
                [u'cluj', u'iasi', u'timisoara', u'bucuresti'],
                ignore_case=True))

    while True:
        group_id = group_id or get_input(
            u'Group ID: ',
            completer=WordCompleter([
                u'RoPython-Bucuresti', u'RoPython-Cluj', u'RoPython_Iasi',
                u'RoPython-Timisoara'
            ],
                                    ignore_case=True))

        resp = requests.get('https://api.meetup.com/2/events',
                            params=dict(
                                key=key,
                                group_urlname=group_id,
                                time=time_boundary,
                                status=event_status,
                            ))
        if resp.status_code == 200:
            json = resp.json()
            if json['results']:
                break
            else:
                click.secho(
                    'Invalid group `{}`. It has no events!'.format(group_id),
                    fg='red')
                group_id = None
        if resp.status_code == '400':
            click.fail(
                'Failed to get make correct request. Response was {!r}'.format(
                    resp.text))
        else:
            click.secho('Invalid group `{}`. Response was [{}] {!r}'.format(
                group_id, resp.status_code, resp.text),
                        fg='red')

    # click.echo(pformat(dict(resp.headers)))

    for event in json['results']:
        dt = datetime.fromtimestamp(event['time'] / 1000)
        event['duration'] = format_duration(
            event.get('duration', 3600000) / 1000)
        event['time'] = dt.strftime('%Y-%m-%d %H:%M')
        if 'how_to_find_us' in event:
            address = event['how_to_find_us'],
        else:
            address = ()
        if 'venue' in event:
            address_1 = event['venue'].get('address_1')
            if address_1:
                address += address_1,
            event['venue']['address_1'] = ', '.join(address)
        else:
            event['venue'] = {'address_1': address}
        click.echo("{time}: {name}".format(**event))
        click.echo("\t{}".format(pformat(event)))
        existing_path = glob(
            os.path.join('content', '*', dt.strftime('%Y-%m-%d*'),
                         'index.rst'))
        if existing_path and not force:
            if len(existing_path) > 1:
                click.secho('\tERROR: multiple paths matched: {}'.format(
                    existing_path))
            else:
                click.secho('\t`{}` already exists. Not importing.'.format(
                    *existing_path),
                            fg='yellow')
        else:
            target_dir = os.path.join(
                'content', location, '{}-{}'.format(dt.strftime('%Y-%m-%d'),
                                                    slugify(event['name'])))
            target_path = os.path.join(target_dir, 'index.rst')
            if not os.path.exists(target_dir):
                os.makedirs(target_dir)

            if pandoc:
                with tempfile.NamedTemporaryFile(delete=False) as fh:
                    fh.write(event['description'].encode('utf-8'))
                rst = subprocess.check_output(
                    ['pandoc', '--from=html', '--to=rst',
                     fh.name]).decode('utf-8')
                os.unlink(fh.name)
            else:
                rst = html2rest(event['description'])

            doc = u'''{name}
###############################################################

:tags: prezentari
:registration:
    meetup.com: {event_url}
:start: {time}
:duration: {duration}
:location: {venue[address_1]}, {venue[city]}, {venue[localized_country_name]}

{rst}'''.format(rst=rst, **event)
            with io.open(target_path, 'w', encoding='utf-8') as fh:
                fh.write(doc)
            click.secho('\tWrote `{}`.'.format(target_path), fg='green')
示例#55
0
 def slug(self):
     if self._slug is None:
         self._slug = slugify(self.name,
                              self.settings.get('SLUG_SUBSTITUTIONS', ()))
     return self._slug
示例#56
0
 def name(self, name):
     self._name = name
     self.slug = slugify(name)
示例#57
0
 def _normalize_key(self, key):
     return six.text_type(slugify(key))
示例#58
0
 def __init__(self, name, settings):
     self.name = unicode(name)
     self.slug = slugify(self.name)
     self.settings = settings
示例#59
0
    def read(self, filename):
        metadata = {
            "category": "MTG_Deck",
            "date": "2020-04-13",
            "template": "mtg_deck",
        }

        deck_data = {"main": [], "sideboard": [], "colors": []}
        description = []

        cmc_per_color = defaultdict(list)

        with open(filename, "r") as fin:
            for line in fin:
                if line.startswith("//"):
                    tag, value = parse_meta(line)
                    metadata[tag.lower()] = value
                elif line.startswith("---"):
                    # This is the description, read until end of file
                    for dl in fin:
                        description.append(dl.strip())
                elif line.strip() != "":
                    sideboard, card_set, card_count, card_name = parse_card_line(
                        line)
                    self.add_card_data(card_set, card_name)

                    card_data = {
                        "name":
                        card_name,
                        "count":
                        card_count,
                        "data":
                        self.cached_data[card_set][card_name],
                        "card_type":
                        parse_card_type(self.cached_data[card_set][card_name]
                                        ["type_line"]),
                    }

                    for color in self.cached_data[card_set][card_name][
                            "colors"]:
                        if color not in deck_data["colors"]:
                            deck_data["colors"].append(color)

                    if sideboard:
                        deck_data["sideboard"].append(card_data)
                    else:
                        deck_data["main"].append(card_data)

                        if card_data["card_type"] != "land":
                            card_colors = self.cached_data[card_set][
                                card_name]["colors"]
                            num_colors = len(card_colors)
                            card_cmc = min(
                                11,
                                int(self.cached_data[card_set][card_name]
                                    ["cmc"]))

                            if num_colors == 1:
                                cmc_per_color[card_colors[0]].extend(
                                    [card_cmc] * card_count)
                            elif num_colors == 0:
                                # Artifact and devoid cards
                                cmc_per_color["A"].extend([card_cmc] *
                                                          card_count)
                            else:
                                # Multicolor and hybrid cards
                                cmc_per_color["M"].extend([card_cmc] *
                                                          card_count)

        cmc_distribution = {
            k: dict(Counter(v))
            for k, v in cmc_per_color.items()
        }
        for k in cmc_distribution.keys():
            for i in range(0, 12):
                if i not in cmc_distribution[k].keys():
                    cmc_distribution[k][i] = 0

        deck_data["colors_string"] = "".join(
            ["{" + str(c).upper() + "}" for c in sorted(deck_data["colors"])])
        deck_data["cmc_distribution"] = {
            k: [a[1] for a in sorted(v.items())]
            for k, v in cmc_distribution.items()
        }
        deck_data["cmc_distribution_colors"] = cmc_distribution_colors

        self.write_cache()

        metadata["title"] = metadata["name"]
        metadata["slug"] = slugify(
            metadata["title"],
            regex_subs=self.settings.get("SLUG_REGEX_SUBSTITUTIONS", []),
        )

        metadata["description"] = "\n".join(description)

        metadata["url"] = f"mtg/{metadata['format']}/{metadata['slug']}/"
        metadata["save_as"] = f"{metadata['url']}index.html"

        parsed = {}
        for key, value in metadata.items():
            parsed[key] = self.process_metadata(key, value)

        parsed["deck"] = deck_data

        return "", parsed
示例#60
0
def my_slugify(text):
    return slugify(text, SLUG_REGEX_SUBSTITUTIONS)