def create_feed():
    return feedgenerator.Atom1Feed(
        title=SITE["name"],
        link=f'{SITE["url"]}/',
        feed_url=f'{SITE["url"]}/{SITE["feed_path"]}',
        description=f"Feed for {SITE['url']}",
    )
Beispiel #2
0
 def test_atom1_mime_type(self):
     """
     Test to make sure Atom MIME type has UTF8 Charset parameter set
     """
     atom_feed = feedgenerator.Atom1Feed(title=u'title')
     self.assertEqual(atom_feed.mime_type,
                      "application/atom+xml; charset=utf-8")
Beispiel #3
0
def generate_feed(
    articles,
    output_dir,
    base_url,
    blog_title,
    blog_description,
    blog_author,
):
    """Generate Atom feed.

    Parameters
    ----------
    articles : list[list[str, dict]]
        list of relative output path and article dictionary
    output_dir : str
        where the feed is stored
    base_url : str
        base url
    blog_title : str
        blog title
    blog_description : str
        blog description
    blog_author : str
        blog author

    """
    logger.info('Generating Atom feed.')
    feed = feedgenerator.Atom1Feed(
        link=base_url,
        title=blog_title,
        description=blog_description,
        feed_url=base_url + 'atom.xml',
    )

    for dst, context in articles:
        # if article has a description, use that. otherwise fall back to
        # the title
        description = context.get('description', context['title'])

        feed.add_item(
            title=context['title'],
            author_name=blog_author,
            link=base_url + dst,
            description=description,
            content=context['content'],
            pubdate=context['date'],
        )

    with open(f'{output_dir}/atom.xml', 'w') as fh:
        feed.write(fh, encoding='utf8')
Beispiel #4
0
def atom_feed(posts):
    feed = feedgenerator.Atom1Feed(title="Krzysztof Kowalczyk blog",
                                   link="http://blog.kowalczyk.info/feed/",
                                   description="Krzysztof Kowalczyk blog")

    posts = posts[-25:]
    posts.reverse()
    for p in posts:
        title = p["title"]
        link = "http://blog.kowalczyk.info/" + p["url"]
        description = get_post_html_content(p)
        pubdate = datetime.datetime.strptime(p["date"], "%Y-%m-%d %H:%M:%S")
        feed.add_item(title=title,
                      link=link,
                      description=description,
                      pubdate=pubdate)
    return feed
 def createFeed(self):
     feed = feedgenerator.Atom1Feed(title=self.sitename, link=self.url, description='')
     articles = [i.replace(ARTICLE_PATH, '').replace('.md', '') for i in os.listdir(ARTICLE_PATH)]
     articles = sorted(articles, reverse=True)
     for i in articles:
         with open(ARTICLE_PATH + i + '.md', mode='r', encoding='utf_8') as f:
             text = f.read()
         md = markdown.Markdown(extensions=['meta'])
         md.convert(text)
         title = str(md.Meta['title'][0]).replace('"', '')
         link = self.url + '/' + ARTICLE_PATH + i
         year, month, day = map(int, str(md.Meta['date'][0]).split('-'))
         jst = timezone(timedelta(hours=+9), 'JST')
         date = datetime(year, month, day, tzinfo=jst)
         feed.add_item(title=title, link=link, description='', pubdate=date)
     with open(OUTPUT_PATH + self.name + '.xml', mode='w', encoding='utf_8') as f:
         f.write(feed.writeString('utf_8'))
Beispiel #6
0
def generate(query):
    url = "https://derpibooru.org/search.json?filter_id=56027&" + query
    feedUrl = "https://derpibooru.org/search?filter_id=56027&" + query

    response = urlopen(url)
    jsonData = loads(response.read().decode("utf-8"))

    feedName = "Derpibooru Search: " + query

    feed = feedgenerator.Atom1Feed(
            title=feedName,
            description=query,
            link=feedUrl)

    for post in jsonData["search"]:
        # Generate description/post contents
        descript = '<p><a href="' + post["image"] + '"><img src="' +\
            post["representations"]["medium"] + '"></a></p>'

        # Add original post description (if any)
        descript += "<p>" + post["description"] + "</p>"

        # Add size information
        size = str(post["width"]) + "x" + str(post["height"])
        descript += "<b>Size:</b> " + size + "<br>"

        # Add a link to the source url of the post
        if post["source_url"]:
            descript += '<b>Source:</b> <a href="' + post["source_url"] + '">' +\
                post["source_url"] + '</a><br>'

        postUrl = "https://derpibooru.org/" + str(post["id"])
        date = datetime.strptime(post["created_at"], '%Y-%m-%dT%H:%M:%S.%fZ')
        tags = post["tags"].split(sep=', ')

        feed.add_item(
            title=str(post["id"]),
            link=postUrl,
            pubdate=date,
            unique_id=str(post["id"]),
            description=descript,
            categories=tags)

    return feed.writeString("utf-8")
Beispiel #7
0
 def feed(cls,
          q,
          feed_type,
          title,
          link,
          description,
          since=None,
          until=None,
          page=None,
          limit=None):
     "Produces feedgenerator Feed"
     d = dict(title=title,
              link=h.absurl(h.urlquote(link)),
              description=description,
              language='en',
              feed_url=request.url)
     if feed_type == 'atom':
         feed = FG.Atom1Feed(**d)
     elif feed_type == 'rss':
         feed = RssFeed(**d)
     limit, page = h.paging_sanitizer(limit or 10, page)
     query = defaultdict(dict)
     if callable(q):
         q = q(since, until, page, limit)
     query.update(q)
     if since is not None:
         query['pubdate']['$gte'] = since
     if until is not None:
         query['pubdate']['$lte'] = until
     cur = cls.query.find(query)
     cur = cur.sort('pubdate', pymongo.DESCENDING)
     cur = cur.limit(limit)
     cur = cur.skip(limit * page)
     for r in cur:
         feed.add_item(title=r.title,
                       link=h.absurl(h.urlquote_path_only(r.link)),
                       pubdate=r.pubdate,
                       description=r.description,
                       unique_id=h.absurl(r.unique_id),
                       author_name=r.author_name,
                       author_link=h.absurl(r.author_link))
     return feed
Beispiel #8
0
    def feed(self) -> feedgenerator.SyndicationFeed:
        feed = feedgenerator.Atom1Feed(
            title="De dagelijke Dilbert",
            link="http://dilbert.com/",
            feed_url="{}/{}".format(self.baseurl, self.feedname),
            description="Daily dilbert in a nice feed",
            language="en",
        )

        for comic in self.comics(10):
            _debug("Adding comic for {}".format(comic.pubdate))
            feed.add_item(
                unique_id=comic.url,
                title="{} — {}".format(comic.pubdate, comic.title),
                author_name="Scott Adams",
                link="http://dilbert.com/strip/{}".format(comic.pubdate),
                updateddate=comic.updated,
                pubdate=datetime.combine(comic.pubdate, datetime.min.time()),
                description=comic.tag(baseurl=self.baseurl),
            )
        return feed
Beispiel #9
0
 def feed(self, **kw):
     data = self._get_activities_data(**kw)
     response.headers['Content-Type'] = str('')
     response.content_type = str('application/xml')
     d = {
         'title':
         'Activity for %s' % data['followee'].activity_name,
         'link':
         h.absurl(self.app.url),
         'description':
         'Recent activity for %s' % (data['followee'].activity_name),
         'language':
         'en',
     }
     if request.environ['PATH_INFO'].endswith(str('.atom')):
         feed = FG.Atom1Feed(**d)
     else:
         feed = FG.Rss201rev2Feed(**d)
     for t in data['timeline']:
         url_id = h.absurl(
             t.obj.activity_url
         )  # try to keep this consistent over time (not url-quoted)
         url = h.absurl(h.urlquote_path_only(t.obj.activity_url))
         feed.add_item(title='%s %s %s%s' % (
             t.actor.activity_name,
             t.verb,
             t.obj.activity_name,
             ' on %s' %
             t.target.activity_name if t.target.activity_name else '',
         ),
                       link=url,
                       pubdate=t.published,
                       description=t.obj.activity_extras.get('summary'),
                       unique_id=url_id,
                       author_name=t.actor.activity_name,
                       author_link=h.absurl(t.actor.activity_url))
     return feed.writeString('utf-8')
 def test_003_string_results_atom(self):
     #import ipdb; ipdb.set_trace()
     feed = feedgenerator.Atom1Feed(**FIXT_FEED)
     feed.add_item(**FIXT_ITEM)
     result = feed.writeString(ENCODING)
     if six.PY3:
         # On Python 3, result of feedgenerator is a unicode string!
         # So do not encode our expected_result.
         expected_result = build_expected_atom_result(
             feed, EXPECTED_RESULT_ATOM, None)
     else:
         # On Python 2, result of feedgenerator is a str string!
         # Expected_result must therefore encoded likewise.
         expected_result = build_expected_atom_result(
             feed, EXPECTED_RESULT_ATOM, ENCODING)
     # The different result types of Python 2 (str=bytes) and Python 3
     # (str=text=unicode) stems from a different implementation of StringIO.
     # As I understand it, getvalue() in Python 2 returns the type you
     # originally wrote into the buffer. In Python 3 getvalue() always
     # returns a str (=text=unicode).
     # See other test: test_stringio.py
     #print type(result), type(expected_result)
     self.assertEqual(type(result), type(expected_result))
     self.assertEqual(result, expected_result)
Beispiel #11
0
    return caption


''' create a list of images to display '''
dirList = os.listdir("b/")
imagelist = []
for fname in dirList:
    if fname.endswith(".jpg"):
        imagelist.append(fname)
imagelist.sort(reverse=True)

feed = feedgenerator.Atom1Feed(
    title="Une petite faim ?",
    link="https://nicolas.legaillart.fr/miam",
    description="C'est pas tr&egrave;s joli, mais en tout cas c'est bon",
    language="fr",
    feed_url="https://nicolas.legaillart.fr/miam/feed",
)
''' gemini page '''
print("creating gemini page")
h = open('header.gmi', 'r').read()
f = open('miam.gmi', 'w')
f.write(h)

for item in imagelist:
    ''' build page structure '''
    imageline = "=> b/%s" % item
    caption = getcaption(item)
    if caption:
        imageline += " %s\n" % html.unescape(caption)
            FEED_ITEM = dict(
                title='Version {} is available!'.format(version),
                link='https://community.teamspeak.com/c/announcements/7',
                description='Version {} was released'.format(version),
                content=
                """Version {} was released, get more information at <a href=\"https://forum.teamspeak.com/forums/91-Latest-News\">the forums</a>.<br>Download here: <a href=\"{}\">{}</a><br>SHA256: {}"""
                .format(version, downloadlink, downloadlink, checksum),
                pubdate=timestamp,
                unique_id=guid)

            if args.verbose:
                print("[INFO] FEED: {}".format(FEED))
                print("[INFO] ITEM: {}".format(FEED_ITEM))
            import feedgenerator
            atomfeed = feedgenerator.Atom1Feed(**FEED)
            rssfeed = feedgenerator.Rss201rev2Feed(**FEED)

            atomfeed.add_item(**FEED_ITEM)
            rssfeed.add_item(**FEED_ITEM)

            with open('{}{}_{}.atom'.format(storedir, platform, arch),
                      'w') as f:
                result = atomfeed.write(f, 'utf-8')
            with open('{}{}_{}.rss'.format(storedir, platform, arch),
                      'w') as f:
                result = rssfeed.write(f, 'utf-8')

    with open('{}/index.html'.format(storedir), 'w') as f:
        f.write("""
<html>