コード例 #1
0
class TestExtensionDc(unittest.TestCase):

    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('dc')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_entryLoadExtension(self):
        fe = self.fg.add_item()
        try:
            fe.load_extension('dc')
        except ImportError:
            pass  # Extension already loaded

    def test_elements(self):
        for method in dir(self.fg.dc):
            if method.startswith('dc_'):
                m = getattr(self.fg.dc, method)
                m(method)
                assert m() == [method]

        self.fg.id('123')
        assert self.fg.atom_str()
        assert self.fg.rss_str()
コード例 #2
0
def get_weekly_jobs_rss():
    redis_url = current_app.config.get("CACHE_REDIS_URL")
    jobs_weekly_email_key = current_app.config.get(
        "WEEKLY_JOBS_EMAIL_REDIS_KEY")

    redis = StrictRedis.from_url(redis_url)

    raw_email_entry = redis.hgetall(jobs_weekly_email_key)
    title = raw_email_entry[b"title"].decode("UTF-8")
    content = raw_email_entry[b"html"].decode("UTF-8")
    timestamp = float(raw_email_entry[b"timestamp"])
    date = datetime.fromtimestamp(timestamp, tz=pytz.UTC)

    feed = FeedGenerator()
    feed.link(href=request.url_root)
    feed.title("INSPIRE Weekly HEP Jobs")
    feed.author({"name": "inspirehep.net"})
    feed.description("Feed for weekly HEP jobs from INSPIRE")
    feed.pubDate(date)
    feed.lastBuildDate(date)

    entry = feed.add_entry()
    entry.id(str(timestamp))
    entry.title(title)
    entry.content(content)
    entry.published(date)

    return Response(response=feed.rss_str(), mimetype="application/rss+xml")
コード例 #3
0
def gen_feed(title,
             author,
             feed_url,
             url,
             subtitle,
             logo,
             categories=None,
             album=False,
             licenses=False):
    fg = FeedGenerator()
    fg.load_extension("podcast")

    fg.id(feed_url)
    fg.title(title)
    fg.author(author)
    fg.link(href=url)
    fg.link(href=feed_url, rel="self")
    fg.logo(logo)
    fg.subtitle(subtitle)
    fg.language("en")
    fg.generator(generator="reel2bits",
                 uri=f"https://{current_app.config['AP_DOMAIN']}",
                 version=g.cfg["REEL2BITS_VERSION"])

    if album and categories:
        fg.podcast.itunes_category(categories[0])
        fg.category([{"term": c, "label": c} for c in categories])

    if licenses:
        fg.rights("See individual tracks: " + ", ".join(licenses))

    return fg
コード例 #4
0
def feed(request):
    address_info = resolve_address(request)
    if not address_info:
        return redirect('/')

    blog = address_info['blog']
    root = address_info['root']

    all_posts = blog.post_set.filter(publish=True,
                                     is_page=False).order_by('-published_date')

    fg = FeedGenerator()
    fg.id(f'http://{root}/')
    fg.author({'name': blog.subdomain, 'email': blog.user.email})
    fg.title(blog.title)
    fg.subtitle(unmark(blog.content)[:160])
    fg.link(href=f"http://{root}/", rel='alternate')

    for post in all_posts:
        fe = fg.add_entry()
        fe.id(f"http://{root}/{post.slug}/")
        fe.title(post.title)
        fe.author({'name': blog.subdomain, 'email': blog.user.email})
        fe.link(href=f"http://{root}/feed/")
        fe.content(unmark(post.content))
        fe.updated(post.published_date)

    if request.GET.get('type') == 'rss':
        fg.link(href=f"http://{root}/feed/?type=rss", rel='self')
        rssfeed = fg.rss_str(pretty=True)
        return HttpResponse(rssfeed, content_type='application/rss+xml')
    else:
        fg.link(href=f"http://{root}/feed/", rel='self')
        atomfeed = fg.atom_str(pretty=True)
        return HttpResponse(atomfeed, content_type='application/atom+xml')
コード例 #5
0
def serialize_category_atom(category, url, user, event_filter):
    """Export the events in a category to Atom.

    :param category: The category to export
    :param url: The URL of the feed
    :param user: The user who needs to be able to access the events
    :param event_filter: A SQLalchemy criterion to restrict which
                         events will be returned.  Usually something
                         involving the start/end date of the event.
    """
    query = (Event.query
             .filter(Event.category_chain_overlaps(category.id),
                     ~Event.is_deleted,
                     event_filter)
             .options(load_only('id', 'category_id', 'start_dt', 'title', 'description', 'protection_mode',
                                'access_key'),
                      subqueryload('acl_entries'))
             .order_by(Event.start_dt))
    events = [e for e in query if e.can_access(user)]

    feed = FeedGenerator()
    feed.id(url)
    feed.title(f'Indico Feed [{category.title}]')
    feed.link(href=url, rel='self')

    for event in events:
        entry = feed.add_entry(order='append')
        entry.id(event.external_url)
        entry.title(event.title)
        entry.summary(sanitize_html(str(event.description)) or None, type='html')
        entry.link(href=event.external_url)
        entry.updated(event.start_dt)
    return BytesIO(feed.atom_str(pretty=True))
コード例 #6
0
def create_rss_feed(poemlink):
    """
    Takes the link that we fetched
    and then writes it to an xml file for a feed reader to fetch

    :param poemlink: url
    :type poemlink: string
    """

    # Create a feedgen feed instance and populate it with my details
    poemfeed = FeedGenerator()
    poemfeed.title("Jason's PF feed")
    poemfeed.link(href=poemlink)
    poemfeed.description("Poem of the day")
    poemfeed.lastBuildDate(datetime.now(pytz.timezone('Asia/Kolkata')))

    # Create an rss entry with the url we scraped and parsed
    pf_current_entry = poemfeed.add_entry()
    pf_current_entry.title(f"Poem for {date.today()}")
    pf_current_entry.link(href=poemlink)
    pf_current_entry.guid(f"Poem for {date.today()}")
    pf_current_entry.pubDate(datetime.now(pytz.timezone('Asia/Kolkata')))

    # Write the feed
    poemfeed.rss_file('poem.xml')
コード例 #7
0
def write_feed(posts):
    fg = FeedGenerator()
    fg.id(f"{BASE_URL}/updates/")
    fg.title("St Columbas's - Updates")
    fg.author(name="St Columba's Free Church", email="*****@*****.**")
    fg.link(href=f"{BASE_URL}/updates")
    fg.logo(f"{BASE_URL}/static/images/stcolumbas-logo-small.png")
    fg.language("en")
    fg.description("St Columba's Free Church Updates")

    for p in posts:
        fe = fg.add_entry()
        fe.id(f'{BASE_URL}/{p["path"]}')
        fe.title(p["title"])
        fe.link(href=f'{BASE_URL}/{p["path"]}')
        fe.author(name=p["author"])
        fe.summary(p["intro"])
        fe.content(markdown(p["body"]))

    rss_path = os.path.join(DIST_DIR, "updates", "rss.xml")
    atom_path = os.path.join(DIST_DIR, "updates", "atom.xml")
    print(rss_path)
    fg.rss_file(rss_path)
    print(atom_path)
    fg.atom_file(atom_path)
コード例 #8
0
ファイル: feed.py プロジェクト: d2m/pub-dartlang
 def generate_feed(page=1):
     feed = FeedGenerator()
     feed.id("https://pub.dartlang.org/feed.atom")
     feed.title("Pub Packages for Dart")
     feed.link(href="https://pub.dartlang.org/", rel="alternate")
     feed.link(href="https://pub.dartlang.org/feed.atom", rel="self")
     feed.description("Last Updated Packages")
     feed.author({"name": "Dart Team"})
     i = 1
     pager = QueryPager(int(page), "/feed.atom?page=%d",
                        Package.all().order('-updated'),
                        per_page=10)
     for item in pager.get_items():
         i += 1
         entry = feed.add_entry()
         for author in item.latest_version.pubspec.authors:
             entry.author({"name": author[0]})
         entry.title("v" + item.latest_version.pubspec.get("version") +\
             " of " + item.name)
         entry.link(link={"href": item.url, "rel": "alternate",
             "title": item.name})
         entry.id(
             "https://pub.dartlang.org/packages/" + item.name + "#" +\
             item.latest_version.pubspec.get("version"))
         entry.description(
             item.latest_version.pubspec
             .get("description", "Not Available"))
         readme = item.latest_version.readme
         if not readme is None:
             entry.content(item.latest_version.readme.render(), type='html')
         else:
             entry.content("<p>No README Found</p>", type='html')
     return feed
コード例 #9
0
ファイル: ming.py プロジェクト: adow/ming
 def render_atom(self):
     fg = FeedGenerator()
     fg.id(self.site_url)
     fg.title(self.site_title)
     fg.link(href = self.site_url,rel = 'alternate')
     fg.link(href = self.site_url + 'atom.xml',rel = 'self')
     fg.language('zh-cn')
     link_list = ArticleManager.sharedManager().link_list()
     for link in link_list:
         article = ArticleManager.sharedManager().article_for_link(link)
         if not article:
             continue
         fe = fg.add_entry()
         fe.id(article.article_link)
         fe.link(link = {'href':self.site_url + article.article_link})
         fe.title(article.article_title)
         fe.description(article.article_subtitle or '')
         fe.author(name = article.author or '',
                 email = article.author_email or '')
         d = datetime.strptime(article.article_publish_date,'%Y-%m-%d') 
         pubdate = datetime(year = d.year, month = d.month, day = d.day,tzinfo = UTC(8))
         fe.pubdate(pubdate) 
         article.render_content_html()
         fe.content(content = article._content_html,
                 type = 'html')
     atom_feed = fg.atom_str(pretty = True)
     return atom_feed
コード例 #10
0
ファイル: controller.py プロジェクト: alexandreblin/tvshows
def latestRss(userID):
    userID = userID.lower()

    shows = {}
    episodes = []
    today = date.today().strftime('%Y-%m-%d')
    for showID in series.getUserShowList(userID):
        shows[showID] = series.getShowInfo(userID, showID, withEpisodes=True, onlyUnseen=True)
        episodes.extend((showID, episode) for episode in shows[showID]['episodes'] if episode['airdate'] and airdateKey(episode['airdate']) < today)

    episodes.sort(key=episodeAirdateKey, reverse=True)

    feed = FeedGenerator()
    feed.id(userID)
    feed.title('%s\'s shows' % userID)
    feed.description('Unseen episodes')
    feed.link(href=request.url_root)
    feed.language('en')

    for showID, episode in episodes:
        entry = feed.add_entry()
        entry.id('%s/%s' % (showID, episode['episode_id']))
        entry.title('%s S%02dE%02d: %s' % (shows[showID]['name'], episode['season'], episode['episode'], episode['title']))

    return feed.rss_str(pretty=True)
コード例 #11
0
ファイル: views.py プロジェクト: jniebuhr/WatchPeopleCode
def podcast_feed():
    logo_url = url_for("static", filename="wpclogo_big.png", _external=True)

    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.podcast.itunes_category('Technology', 'Podcasting')
    fg.podcast.itunes_image(logo_url)
    fg.author({'name': 'Nathan Kellert', 'email': '*****@*****.**'})
    fg.link(href='http://watchpeoplecode.com/podcast_feed.xml', rel='self')
    fg.title('WPC Coders Podcast')
    fg.description('WPC Coders Podcast is a weekly peek into the lives of developers and the WatchPeopleCode community. Our goal is to keep our listeners entertained by giving them new and interesting insights into our industry as well as awesome things happening within our own community. Here, you can expect hear about some of the latest news, tools, and opportunities for developers in nearly every aread of our industry. Most importantly, we hope to have some fun and a few laughs in ways only other nerds know how.')  # NOQA

    episodes = [('ep1.mp3', 'Episode 1', datetime(2015, 02, 21, 23), 'Learn all about the WPC hosts, and where we came from in Episode 1!'),
                ('ep2.mp3', 'Episode 2', datetime(2015, 02, 28, 23), 'This week we cover your news, topics and questions in episode 2!'),
                ('ep3.mp3', 'Episode 3', datetime(2015, 03, 07, 23), "On todays podcast we talk to WatchPeopleCode's founder Alex Putilin. Hear about how the reddit search engine thousands watched him write. Also, hear the inside scoop of how WatchPeopleCode got started!"),  # NOQA
                ('ep4.mp3', 'Episode 4', datetime(2015, 03, 14, 23), "This week we talk to FreeCodeCamps Quincy Larson(http://www.freecodecamp.com) about their project that combines teaching new developers how to code and completing projects for non-profits! Lets find out how this group of streamers code with a cause!")]  # NOQA

    for epfile, eptitle, epdate, epdescription in episodes[::-1]:
        epurl = "https://s3.amazonaws.com/wpcpodcast/{}".format(epfile)
        fe = fg.add_entry()
        fe.id(epurl)
        fe.title(eptitle)
        fe.description(epdescription)
        fe.podcast.itunes_image(logo_url)
        fe.pubdate(epdate.replace(tzinfo=pytz.UTC))
        fe.enclosure(epurl, 0, 'audio/mpeg')

    return Response(response=fg.rss_str(pretty=True),
                    status=200,
                    mimetype='application/rss+xml')
コード例 #12
0
ファイル: rc2pc.py プロジェクト: dmascialino/rc2pc
def write_podcast(show, podcast_dir, base_public_url, showlocal_tz):
    """Create the podcast file."""
    fg = FeedGenerator()
    fg.load_extension('podcast')

    url = "{}{}.xml".format(base_public_url, show.id)
    fg.id(url.split('.')[0])
    fg.title(show.name)
    fg.image(show.image_url)
    fg.description(show.description)
    fg.link(href=url, rel='self')

    # collect all mp3s for the given show
    all_mp3s = glob.glob(os.path.join(podcast_dir, "{}_*.mp3".format(show.id)))

    for filepath in all_mp3s:
        filename = os.path.basename(filepath)
        mp3_date = _get_date_from_mp3_path(filepath, showlocal_tz)
        mp3_size = os.stat(filepath).st_size
        mp3_url = base_public_url + filename
        mp3_id = filename.split('.')[0]
        title = "Programa del {0:%d}/{0:%m}/{0:%Y}".format(mp3_date)

        # build the rss entry
        fe = fg.add_entry()
        fe.id(mp3_id)
        fe.pubdate(mp3_date)
        fe.title(title)
        fe.enclosure(mp3_url, str(mp3_size), 'audio/mpeg')

    fg.rss_str(pretty=True)
    fg.rss_file(os.path.join(podcast_dir, '{}.xml'.format(show.id)))
コード例 #13
0
ファイル: feeder.py プロジェクト: moridius/feeder
class Feeder():
    def __init__( self, url, title='', feedURL='' ):
        scraper = None
        if url.startswith( "https://twitter.com/" ):
            scraper = TwitterScraper( url )
            if title == '':
                title = "Twitter: @" + url.split('/')[3]
        elif url.startswith( "http://www.lindwurm-linden.de/termine" ):
            scraper = LindwurmScraper( url )
            if title == '':
                title = "Lindwurm: Termine"
        else:
            raise UnsupportedService( "No scraper found for this URL." )

        self.feed = FeedGenerator()        
        self.feed.id( url )
        self.feed.title( title )
        self.feed.author( { "name": url } )

        if feedURL != '':
            self.feed.link( href=feedURL, rel='self' )

        for entry in scraper.entries:
            fe = self.feed.add_entry()
            fe.id( entry['url'] )
            fe.title( entry['title'] )
            fe.link( href=entry['url'], rel='alternate' )
            fe.content( entry['text'] )

    def GetAtom( self ):
        return self.feed.atom_str( pretty=True ).decode()
コード例 #14
0
class TestExtensionTorrent(unittest.TestCase):

    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('torrent')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_podcastEntryItems(self):
        fe = self.fg.add_item()
        fe.title('y')
        fe.torrent.filename('file.xy')
        fe.torrent.infohash('123')
        fe.torrent.contentlength('23')
        fe.torrent.seeds('1')
        fe.torrent.peers('2')
        fe.torrent.verified('1')
        assert fe.torrent.filename() == 'file.xy'
        assert fe.torrent.infohash() == '123'
        assert fe.torrent.contentlength() == '23'
        assert fe.torrent.seeds() == '1'
        assert fe.torrent.peers() == '2'
        assert fe.torrent.verified() == '1'

        # Check that we have the item in the resulting XML
        ns = {'torrent': 'http://xmlns.ezrss.it/0.1/dtd/'}
        root = etree.fromstring(self.fg.rss_str())
        filename = root.xpath('/rss/channel/item/torrent:filename/text()',
                              namespaces=ns)
        assert filename == ['file.xy']
コード例 #15
0
def cache_rss_latest(user_slug):

    articles = data.userDocumentLastChanged_list(user_slug)
    netloc = bottle.request.urlparts.netloc

    fg = FeedGenerator()
    fg.id(abs_url(bottle.request, '/user/%s' % user_slug))
    fg.title('Nigel Chapman (%s)' % netloc)
    fg.subtitle('Long reads on Christian thought')  # <-- Set METADATA for this
    # fg.author( {'name':'Nigel Chapman','email':'*****@*****.**'} )
    fg.logo('https://%s/static/site-image.png' % (netloc))
    fg.link(href='https://%s' % netloc, rel='self')
    # fg.link(href='https://%s/rss/%s.xml' % (netloc, user_slug), rel='self')
    fg.language('en')
    fg.ttl(24 * 3600)

    for a in articles:
        fe = fg.add_entry()
        article_uri = 'read/%s/%s' % (a['user'], a['slug'])
        fe.id(abs_url(bottle.request, article_uri))
        fe.title(a['title'])
        fe.description(a['summary'])
        fe.link(href=abs_url(bottle.request, article_uri))
        fe.author(name=a['email'], email=a['author'])  # <-- Wierdly backwards
        fe.published(a['published_time'])

    feed_xml = fg.rss_str(pretty=True)
    return feed_xml
コード例 #16
0
ファイル: youtube_feed.py プロジェクト: gju/youtube-podcast
class YoutubeFeed:  
    ydl_opts = {
        'format': 'bestaudio/best',
        'outtmpl': '%(id)s.%(ext)s',
        'postprocessors': [{
            'key': 'FFmpegExtractAudio',
            'preferredcodec': 'mp3',
            'preferredquality': '192',
        }]
    }

    def __init__(self, name):
        self.name = name
        self.ydl = youtube_dl.YoutubeDL(self.ydl_opts)

        self.fg = FeedGenerator()
        self.fg.title(name)
        self.fg.author({"name": "Youtube Audio Feed", "email": ""})
        self.fg.link(href="http://www.foo.bar.baz.com", rel="alternate")
        self.fg.description("Personalized Youtube audio feed")
        self.fg.generator("")
        self.fg.docs("")

    def add_video(self, url):
        info = self.ydl.extract_info(url, download=True)
        entry = self.fg.add_entry()
        entry.id(info['id'])
        entry.title(info['title'])
        entry.description(info['description'])
        entry.enclosure(info['id'] + ".mp3", str(info['duration']), 'audio/mpeg')

    def save(self):
        self.fg.rss_file(name + '.xml')
コード例 #17
0
ファイル: conf.py プロジェクト: zephyr/manual
def _build_feed(changelog_entries, format):
    from feedgen.feed import FeedGenerator
    from datetime import datetime
    import pytz

    tz = pytz.timezone('Europe/Berlin')

    BASE_URL = 'https://manual.uberspace.de/en/changelog.'
    HTML_URL = BASE_URL + 'html'

    fg = FeedGenerator()
    fg.id(HTML_URL)
    fg.title('Uberspace 7 Updates')
    fg.link(href=HTML_URL, rel='alternate')
    fg.link(href=BASE_URL + format, rel='self')
    fg.language('en')

    for entry in changelog_entries:
        deeplink = '{}#v{}'.format(HTML_URL,
                                   entry['version'].replace('.', '-'))
        date = tz.localize(datetime.strptime(entry['date'], '%Y-%m-%d'))
        fe = fg.add_entry()
        fe.id(deeplink)
        fe.title('[{}] - {}'.format(entry['version'], entry['date']))
        fe.link(href=deeplink)
        fe.updated(date)
        fe.content(entry['text'].replace('\n', '<br>'))

    if format == 'atom':
        return fg.atom_str(pretty=True)
    else:
        raise Exception('unkown feed format: ' + str(format))
コード例 #18
0
ファイル: blog.py プロジェクト: matrufsc2/matrufsc2
def get_feed(atom=False):
    fg = FeedGenerator()
    domain = get_domain()
    items = get_posts({"limit": "10"}, full=True)["results"]
    fg.id("http://%s/"%domain)
    fg.title("Blog do MatrUFSC2")
    fg.description("Feed do blog do MatrUFSC2, onde noticias e novos recursos sao anunciados primeiro!")
    fg.language('pt-BR')
    fg.link({"href":"/blog/feed","rel":"self"})
    fg.updated(items[0]["posted_at"].replace(tzinfo=pytz.UTC))
    for item in items:
        entry = fg.add_entry()
        entry.title(item["title"])

        tree = html.fromstring(item["summary"])
        cleaner = Cleaner(allow_tags=[])
        tree = cleaner.clean_html(tree)

        text = tree.text_content()
        entry.description(text, True)
        entry.link({"href":item["link"],"rel":"self"})
        entry.content(item["body"])
        entry.published(item["posted_at"].replace(tzinfo=pytz.UTC))
        entry.updated(item["posted_at"].replace(tzinfo=pytz.UTC))
        entry.category({"label": item["category"]["title"], "term": item["category"]["slug"]})
        entry.id(item["id"])
    if atom:
        return fg.atom_str(pretty=True)
    else:
        return fg.rss_str(pretty=True)
コード例 #19
0
class TestExtensionSyndication(unittest.TestCase):

    SYN_NS = {'sy': 'http://purl.org/rss/1.0/modules/syndication/'}

    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('syndication')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_update_period(self):
        for period_type in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'):
            self.fg.syndication.update_period(period_type)
            root = etree.fromstring(self.fg.rss_str())
            a = root.xpath('/rss/channel/sy:UpdatePeriod',
                           namespaces=self.SYN_NS)
            assert a[0].text == period_type

    def test_update_frequency(self):
        for frequency in (1, 100, 2000, 100000):
            self.fg.syndication.update_frequency(frequency)
            root = etree.fromstring(self.fg.rss_str())
            a = root.xpath('/rss/channel/sy:UpdateFrequency',
                           namespaces=self.SYN_NS)
            assert a[0].text == str(frequency)

    def test_update_base(self):
        base = '2000-01-01T12:00+00:00'
        self.fg.syndication.update_base(base)
        root = etree.fromstring(self.fg.rss_str())
        a = root.xpath('/rss/channel/sy:UpdateBase', namespaces=self.SYN_NS)
        assert a[0].text == base
コード例 #20
0
ファイル: bassdrive.py プロジェクト: bspeice/elektricity
    def build_feed(self):
        "Build the feed given our existing URL"
        # Get all the episodes
        page_content = str(requests.get(self.url).content)
        parser = BassdriveParser()
        parser.feed(page_content)
        links = parser.get_links()

        # And turn them into something usable
        fg = FeedGenerator()
        fg.id(self.url)
        fg.title(self.title)
        fg.description(self.title)
        fg.author({'name': self.dj})
        fg.language('en')
        fg.link({'href': self.url, 'rel': 'alternate'})
        fg.logo(self.logo)

        for link in links:
            fe = fg.add_entry()
            fe.author({'name': self.dj})
            fe.title(link[0])
            fe.description(link[0])
            fe.enclosure(self.url + link[1], 0, 'audio/mpeg')

            # Bassdrive always uses date strings of
            # [yyyy.mm.dd] with 0 padding on days and months,
            # so that makes our lives easy
            date_start = link[0].find('[')
            date_str = link[0][date_start:date_start+12]
            published = datetime.strptime(date_str, '[%Y.%m.%d]')
            fe.pubdate(UTC.localize(published))
            fe.guid((link[0]))

        return fg
コード例 #21
0
def feed(request):
    address_info = resolve_address(request)
    if not address_info:
        return redirect('/')

    blog = address_info['blog']
    root = address_info['root']

    all_posts = blog.post_set.filter(publish=True,
                                     is_page=False).order_by('-published_date')

    fg = FeedGenerator()
    fg.id(f'{root}/')
    fg.author({'name': blog.subdomain, 'email': 'hidden'})
    fg.title(blog.title)
    fg.subtitle(unmark(blog.content)[:160])
    fg.link(href=f"{root}/feed/", rel='self')
    fg.link(href=root, rel='alternate')

    for post in all_posts:
        fe = fg.add_entry()
        fe.id(f"{root}/{post.slug}")
        fe.title(post.title)
        fe.author({'name': blog.subdomain, 'email': 'hidden'})
        fe.link(href=f"{root}/feed")
        fe.content(unmark(post.content))

    atomfeed = fg.atom_str(pretty=True)
    return HttpResponse(atomfeed, content_type='application/atom+xml')
コード例 #22
0
ファイル: rssbook.py プロジェクト: histrio/rssutils
def run(folder, url):
    from feedgen.feed import FeedGenerator

    fg = FeedGenerator()

    head, tail = os.path.split(folder)

    title = tail.decode("utf-8")
    fg.id(str(uuid.uuid4()))
    fg.title(title)

    fg.link(href="{0}/rss.xml".format(url), rel="self")
    fg.description(u"Audiobook `{0}` generated with rssbook".format(title))

    fg.load_extension("podcast")
    for item in sorted(os.listdir(folder)):
        if os.path.splitext(item)[1] == ".mp3":
            get_node(os.path.join(folder, item))

        fullpath = os.path.join(folder, item)
        fe = fg.add_entry()
        fe.id(str(uuid.uuid4()))
        fe.title(title)
        fe.description(item)

        fe.link(
            href="{0}/{1}".format(url, item), rel="enclosure", type="audio/mpeg", length=str(os.stat(fullpath).st_size)
        )

    fg.rss_file(os.path.join(folder, "rss.xml"))
コード例 #23
0
ファイル: hypecast.py プロジェクト: blackmad/hypecast
  def makeRss(self):
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.id('http://hypecast.blackmad.com/' + self.mode)
    fg.title('Hype Machine Robot Radio: ' + self.mode)
    fg.author( {'name':'David Blackmad','email':'*****@*****.**'} )
    fg.logo('http://dump.blackmad.com/the-hype-machine.jpg')
    fg.language('en')
    fg.link(href='http://hypecast.blackmad.com/' + self.mode)
    fg.description('Hype Machine Robot Radio: ' + self.mode)

    description = ' <br/>'.join(['%s. %s' % (index + 1, self.mk_song_id(s)) for index, s in enumerate(self.songs)])

    fe = fg.add_entry()
    fe.title(self.track_name)
    fe.description(description)
    fe.id(self.filename)
    # add length
    print(self.relative_dir)
    print(self.filename)
    fe.enclosure(url = 'http://hypecast.blackmad.com/%s' % (self.filename), type="audio/mpeg")

    rss_str = fg.rss_str()
    newItem = ET.fromstring(rss_str)[0].find('item')
    out = open(self.get_filename('xml'), 'w')
    out.write(ET.tostring(newItem))
    out.close()
    self.updateRss()
コード例 #24
0
ファイル: blog.py プロジェクト: ivanm11/dataflytest
def rss():    
    config = public_app.config['feed']
    fg = FeedGenerator()
    fg.id('%s/blog' % Config.BASE_URL)
    fg.title(config['title'])
    fg.author( {'name': config['author'],'email': config['email']} )
    fg.description(config['desc'])
    fg.link( href=Config.BASE_URL, rel='alternate' )
    query = {
        'id': { '$regex': 'blog' },
        'current': True,
        'meta.hide': { '$ne': True }
    }
    posts = db.pages.find(query).sort('meta.created', -1)[:20]
    for post in posts:
        fe = fg.add_entry()
        fe.title(post['meta']['title'])
        if 'author' in post['meta']:
            fe.author( {'name': post['meta']['author'],'email': config['email']} )
        else:
            fe.author( {'name': config['author'],'email': config['email']} )
        fe.description(do_truncate(post['content'], 300))
        fe.link(href="%s/%s" % (Config.BASE_URL, post['id']), rel='alternate')
        fe.pubdate(utc.localize(post['meta']['created']))
        fe.content(post['content'])    
    response.headers['Content-Type'] = 'application/rss+xml'
    return fg.rss_str(pretty=True)
コード例 #25
0
ファイル: build.py プロジェクト: adelevie/crs-reports-website
def create_feed(reports, title, fn):
    # The feed is a notice of new (versions of) reports, so collect the
    # most recent report-versions.
    feeditems = []
    for i, report in enumerate(reports):
        for j, version in enumerate(report['versions']):
            feeditems.append((version['date'], i, j))
    feeditems.sort(reverse=True)
    feeditems = feeditems[0:75]

    # Create a feed.
    from feedgen.feed import FeedGenerator
    feed = FeedGenerator()
    feed.id(SITE_URL)
    feed.title(SITE_NAME + ' - ' + title)
    feed.link(href=SITE_URL, rel='alternate')
    feed.language('en')
    feed.description(
        description="New Congressional Research Service reports tracked by " +
        SITE_NAME + ".")
    for _, report_index, version_index in feeditems:
        report = reports[report_index]
        version = report["versions"][version_index]
        fe = feed.add_entry()
        fe.id(SITE_URL + "/" + get_report_url_path(report, '.html'))
        fe.title(version["title"])
        fe.description(description=version["summary"])
        fe.link(href=SITE_URL + "/" + get_report_url_path(report, '.html'))
        fe.pubdate(version["date"])
    feed.rss_file(os.path.join(BUILD_DIR, fn))
コード例 #26
0
def feed(column_id):
    api = Api(column_id)

    with request.urlopen(api.info) as stream:
        result = stream.read().decode('utf-8')

    if not result:
        return '', 404

    info = json.loads(result)

    with request.urlopen(api.posts) as stream:
        result = stream.read().decode('utf-8')
        entries = json.loads(result)

    fg = FeedGenerator()
    fg.id(str(entries[0]['slug']))
    fg.title(info['name'])
    fg.language('zh_CN')
    fg.icon(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 's'))
    fg.logo(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 'l'))
    fg.description(info['intro'])
    fg.author(dict(name=info['creator']['name']))
    fg.link(href=api.base_url + info['url'], rel='alternate')
    for entry in entries:
        fe = fg.add_entry()
        fe.id(entry['url'])
        fe.title(entry['title'])
        fe.published(entry['publishedTime'])
        fe.updated(entry['publishedTime'])
        fe.author(dict(name=entry['author']['name']))
        fe.link(href=api.base_url + entry['url'], rel='alternate')
        fe.content(entry['content'])

    return fg.atom_str(pretty=True)
コード例 #27
0
def create_feed():
    """RSS 피드 생성하기"""

    # 피드 데이터 저장 전용 객체
    fg = FeedGenerator()

    # 사용자 정의 네임 스페이스를 등록하고
    # 이전에 만들었던 클래스 적용하기
    fg.register_extension(
        'book',
        extension_class_feed=BookFeedExtension,
        extension_class_entry=BookEntryExtension,
    )

    # <channel><title> 요소
    fg.title("위키북스의 도서 목록")
    # <channel><link> 요소: <link> 태그의 내용은 href 속성으로 지정
    fg.link(href="http://example.com")
    # <channel><description> 요소
    fg.description("설명을 입력했다고 가정합니다.")

    # <channel><item> 요소
    fe = fg.add_entry()
    # <channel><item><title> 요소
    fe.title("파이썬을 이용한 머신러닝, 딥러닝 실전 앱 개발")
    # <channel><item><link> 요소
    fe.link(href="http://example.com")
    # <channel><item><description> 요소
    fe.description('<a href="http://example.com">이스케이프 처리 확인 전용 링크</a>'
                   "설명을 입력했다고 가정합니다.")
    # <channel><item><book:writer> 요소(사용자 정의 네임 스페이스를 사용하는 요소)
    fe.book.publisher({'name': "위키북스", 'id': "1"})  # 값은 딕셔너리 자료형으로 전달합니다.

    # 피드를 RSS 형식으로 변환(pretty=True로 들여쓰기 적용)
    return fg.rss_str(pretty=True)
コード例 #28
0
ファイル: test_entry.py プロジェクト: lkiesow/python-feedgen
    def setUp(self):
        fg = FeedGenerator()
        self.feedId = 'http://example.com'
        self.title = 'Some Testfeed'

        fg.id(self.feedId)
        fg.title(self.title)
        fg.link(href='http://lkiesow.de', rel='alternate')[0]
        fg.description('...')

        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The First Episode')
        fe.content(u'…')

        # Use also the different name add_item
        fe = fg.add_item()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The Second Episode')
        fe.content(u'…')

        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The Third Episode')
        fe.content(u'…')

        self.fg = fg
コード例 #29
0
ファイル: using_api.py プロジェクト: WarmongeR1/vk2rss
def main():
    session = vk.Session()
    api = vk.API(session)

    group_id = '96469126'

    group_info = api.groups.getById(group_ids=group_id, fields=['description', 'site', 'name', 'photo', 'gid'])

    assert len(group_info) == 1
    group_info = group_info[0]

    url = 'http://vk.com/club{}'.format(group_info['gid'])
    # a = api.wall.get(owner_id=-1 * group_info['gid'])
    #
    # with open('out', 'wb') as fio:
    #     pickle.dump(a, fio)

    with open('out', 'rb') as fio:
        data = pickle.loads(fio.read())

    assert len(data) > 1

    fg = FeedGenerator()
    fg.id(url)
    fg.title(_(group_info['name']))
    fg.description(_(group_info['description']))
    fg.logo(group_info['photo'])
    site_url = group_info.get('site', url) if group_info.get('site', url) else url
    fg.link(href=_(site_url))
    fg.link(href=_(site_url), rel='self')
    fg.link(href=_(site_url), rel='alternate')
    fg.author({'name': 'Alexander Sapronov', 'email': '*****@*****.**'})
    fg.webMaster('[email protected] (Alexander Sapronov)')

    pat = re.compile(r"#(\w+)")

    for x in data[1:]:
        post_link = "{}?w=wall-{}_{}".format(url, group_info['gid'], x['id'])
        e = fg.add_entry()
        # text = x.get('text', '').replace('<br>', '\n')
        text = x.get('text', '')

        e.description(_(text))
        e.author({'name': _(get_author_name(api, x.get('from_id')))})
        e.id(post_link)
        e.link(href=_(post_link))
        e.link(href=_(post_link), rel='alternate')

        tags = pat.findall(text)

        title = x.get('text', '')
        for tag in tags:
            e.category(term=_(tag))
            title = title.replace('#{}'.format(tag), '')

        title = re.sub('<[^<]+?>', ' ', title)
        title = textwrap.wrap(title, width=80)[0]
        e.title(_(title.strip()))

    fg.rss_file('rss.xml')
コード例 #30
0
ファイル: feed.py プロジェクト: dyeray/podtube
def get_feed(query, title, description, link, image):
    """Get an RSS feed from the results of a query to the YouTube API."""
    service = _get_youtube_client()
    videos = service.search().list(part='snippet', **query, order='date',
                                   type='video', safeSearch='none').execute()
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.title(title)
    fg.description(description)
    fg.link(href=link, rel='alternate')
    fg.image(image)
    youtube_plugin = get_plugin_from_settings()

    for video in videos['items']:
        try:
            video_url = youtube_plugin.extract_link(
                "https://www.youtube.com/watch?v=" + video['id']['videoId'])
        except PluginException:
            continue
        fe = fg.add_entry()
        fe.id(video['id']['videoId'])
        fe.title(video['snippet']['title'])
        fe.description(video['snippet']['description'])
        fe.pubdate(dateutil.parser.parse(video['snippet']['publishedAt']))
        fe.podcast.itunes_image(video['snippet']['thumbnails']['high']['url'])
        video_info = requests.head(video_url)
        fe.enclosure(video_url, video_info.headers['Content-Length'],
                     video_info.headers['Content-Type'])
    return fg.rss_str(pretty=True)
コード例 #31
0
    def setUp(self):
        fg = FeedGenerator()
        self.feedId = 'http://example.com'
        self.title = 'Some Testfeed'

        fg.id(self.feedId)
        fg.title(self.title)
        fg.link(href='http://lkiesow.de', rel='alternate')[0]
        fg.description('...')

        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The First Episode')
        fe.content(u'…')

        # Use also the different name add_item
        fe = fg.add_item()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The Second Episode')
        fe.content(u'…')

        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The Third Episode')
        fe.content(u'…')

        self.fg = fg
コード例 #32
0
ファイル: generate.py プロジェクト: aaearon/lebatard-show-rss
def generate_feed(output_file, exclude_highlights=True):
    # Parse RSS feed
    d = feedparser.parse(ESPN_RSS_FEED)
    IMAGE_URL = d.feed.image["href"]

    # RSS feed generation
    fg = FeedGenerator()
    fg.load_extension("podcast", rss=True)

    ## RSS tags
    # Required
    fg.title(d.feed.title)
    fg.link(href="https://github.com/aaearon/lebatard-show-rss")
    fg.description(d.feed.description)
    # Optional
    fg.language(d.feed.language)
    fg.image(IMAGE_URL)
    fg.subtitle(d.feed.subtitle)
    # iTunes
    fg.podcast.itunes_author(d.feed.author)
    fg.podcast.itunes_category(itunes_category=d.feed.category)
    fg.podcast.itunes_image(itunes_image=IMAGE_URL)
    fg.podcast.itunes_explicit(itunes_explicit="clean")
    fg.podcast.itunes_owner(name=CONTACT["name"], email=CONTACT["email"])

    tz = pytz.timezone("America/Los_Angeles")

    for e in d.entries:

        if exclude_highlights and episode_duration_string_to_int(e["itunes_duration"]) > 3600:
            pass
        else:
            fe = fg.add_entry()

            fe.id(e.id)
            fe.title(e.title)
            fe.description(e.description)
            fe.enclosure(url=e.enclosures[0]["href"], length=e.enclosures[0]["length"], type=e.enclosures[0]["type"])

            fe.podcast.itunes_summary(e.description)
            fe.podcast.itunes_subtitle(e.description)
            fe.podcast.itunes_duration(e["itunes_duration"])

            dt = datetime.fromtimestamp(time.mktime(e.published_parsed))
            date = tz.localize(dt)

            # Local hour
            if "Show: " in e.title:
                fe.published(date)
            elif "Hour 1" in e.title:
                fe.published(date + timedelta(hours=1))
            elif "Hour 2" in e.title:
                fe.published(date + timedelta(hours=2))
            elif "Hour 3" in e.title:
                fe.published(date + timedelta(hours=3))
            else:
                fe.published(date + timedelta(hours=-1))

    fg.rss_str(pretty=True)
    fg.rss_file(output_file)
コード例 #33
0
def _filter_fb_rss_feeed(url):
    parsed_feed = feedparser.parse(url)
    filtered_entries = filter(
        lambda x: ' shared a link: "' in x.title, parsed_feed.entries)

    fg = FeedGenerator()
    fg.id('https://fb-notifications-to-pocket.herokuapp.com/')
    fg.title('Facebook Notifications to Pocket')
    fg.author({'name': 'Pankaj Singh', 'email': '*****@*****.**'})
    fg.description(
        '''Filter FB notifications which contain a link and generate a new rss feed which will be used by IFTTT''')
    fg.link(href='https://fb-notifications-to-pocket.herokuapp.com/')

    for entry in filtered_entries:
        root = etree.HTML(entry.summary_detail.value)
        title = entry.title.split(" shared a link: ")[1].strip()[1:-2]
        author_name = entry.title.split(" shared a link: ")[0].strip()
        url = urlparse.parse_qs(
            urlparse.urlparse(root.findall(".//a")[-1].attrib["href"]).query)["u"][0]

        title = get_title_for_url(url) or title

        fe = fg.add_entry()
        fe.id(entry.id)
        fe.link(href=url)
        fe.published(entry.published)
        fe.author({'name': author_name})
        fe.title(title)

    return fg.atom_str(pretty=True)
コード例 #34
0
ファイル: handler.py プロジェクト: BlogTANG/blog-a
def feed():
    """
    Generate atom feed
    """
    entries = parse_posts(0, C.feed_count)
    fg = FeedGenerator()
    fg.id(str(len(entries)))
    fg.title(C.title)
    fg.subtitle(C.subtitle)
    fg.language(C.language)
    fg.author(dict(name=C.author, email=C.email))
    fg.link(href=C.root_url, rel='alternate')
    fg.link(href=make_abs_url(C.root_url, 'feed'), rel='self')
    for entry in entries:
        fe = fg.add_entry()
        fe.id(entry.get('url'))
        fe.title(entry.get('title'))
        fe.published(entry.get('date'))
        fe.updated(entry.get('updated') or entry.get('date'))
        fe.link(href=make_abs_url(C.root_url, entry.get('url')), rel='alternate')
        fe.author(dict(name=entry.get('author'), email=entry.get('email')))
        fe.content(entry.get('body'))

    atom_feed = fg.atom_str(pretty=True)
    return atom_feed
コード例 #35
0
def generate_rss(show_info, show_uri, country_code):
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.description(show_info['description'])
    fg.author({'name': show_info['publisher']})
    fg.title(show_info['name'])
    fg.link({'href': show_info['external_urls']['spotify']})
    fg.id(show_uri)
    fg.image(show_info.get('images')[0]['url'])
    total_episodes = show_info['episodes']['total']
    added_episodes = 0
    while added_episodes != total_episodes:
        episodes = sp.show_episodes(show_id=show_uri,
                                    limit=50,
                                    offset=added_episodes,
                                    market=country_code)
        for episode in episodes['items']:
            ent = fg.add_entry()
            ent.podcast.itunes_duration(int(episode['duration_ms'] / 1000))
            ent.title(episode.get('name'))
            ent.guid(episode['uri'])
            ent.published(
                datetime.strptime(episode['release_date'],
                                  '%Y-%m-%d').replace(tzinfo=timezone.utc))
            ent.description(episode['description'])
            ent.id(episode['uri'])
            ent.enclosure(
                url=
                f"https://anon-podcast.scdn.co/{episode['audio_preview_url'].split('/')[-1]}",
                length=0,
                type='audio/mpeg')
            added_episodes += 1
    return fg.rss_str().decode('utf-8')
コード例 #36
0
ファイル: main.py プロジェクト: dgomes/imap2rss
	def GET(self):
		cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
		fg = FeedGenerator()
		#TODO create icon
		# fg.icon('http://www.det.ua.pt')
		fg.id(config.get('rss','id'))
		fg.title(config.get('rss','title'))
		fg.subtitle(config.get('rss','subtitle'))
		fg.description(config.get('rss','description'))
		fg.author({'name': config.get('rss','author_name'), 'email':config.get('rss','author_email')})
		fg.language(config.get('rss','language'))
		fg.link(href=config.get('rss','href'), rel='related')

		client = EmailClient()

		for msgn in reversed(client.listBox(config.get('imap','mailbox'))[:config.getint('rss','maxitems')]):
			cherrypy.log("RSS Entry: "+msgn)
			em = client.getEMail(msgn)
			entry = fg.add_entry()
			entry.title(em['subject'])
			entry.author({'name': em['From']['name'], 'email': em['From']['email']})
			entry.guid(config.get("main","baseurl")+'news/'+msgn)
			entry.link({'href':config.get("main","baseurl")+'news/'+msgn, 'rel':'alternate'})
			entry.pubdate(em['date'])
			entry.content(em['body'])
		return	fg.rss_str(pretty=True)
コード例 #37
0
ファイル: rss.py プロジェクト: JLDevOps/RSS-Cart
def create_rss_feed(filename='rss-new.xml'):
    fg = FeedGenerator()
    fg.id(config.get("DEFAULT", "WEBSITE"))
    fg.title(config.get("DEFAULT", "RSSTITLE"))
    fg.description(config.get("DEFAULT", "RSSDESCRIPTION"))
    
    fg.author
    ( 
        {
            "name": config.get("DEFAULT", "AUTHOR"),
            "email": config.get("DEFAULT", "EMAIL")
        } 
    )

    fg.link(href=config.get("DEFAULT", "RSSLINK"), rel='self' )
    fg.subtitle(config.get("DEFAULT", "RSSSUBTITLE"))
    fg.language(config.get("DEFAULT", "LANGUAGE"))
    feed = get_feed_list()

    for entry in feed:
        fe = fg.add_entry()
        fe.id (entry.id)
        fe.title(entry.title)
        fe.description(entry.description)
        fe.published(entry.published)
        fe.pubDate(entry.published)
        fe.link(href=entry.id)
    
    # Uncomment the bottom if you want an Atom RSS file created instead of an rss.xml
    # atomfeed = fg.atom_str(pretty=True) # Get the ATOM feed as string
    # fg.atom_file('atom-new.xml') # Write the ATOM feed to a file

    rssfeed  = fg.rss_str(pretty=True) # Get the RSS feed as string
    fg.rss_file(filename) # Write the RSS feed to a file
    return fg
コード例 #38
0
ファイル: feed.py プロジェクト: majestrate/pytracker
def generate(app, category, torrents):
    """
    generate an rss feed from category with torrents as results
    if category is None this feed is for all categories
    """
    feed = FeedGenerator()
    if category:
        url = util.fullSiteURL(app, 'feed', '{}.rss'.format(category))
    else:
        url = util.fullSiteURL(app, 'feed', 'all.rss')
    feed.link(href=url, rel="self")
    feed.id(url)
    if category:
        title = "new {} torrents on index ex invisibilis".format(category)
    else:
        title = "new torrents on index ex invisibilis"
    feed.title(title)
    feed.description(title)
    feed.author({"name": "anonymous"})
    feed.language("en")
    for torrent in torrents:
        item = feed.add_entry()
        url = util.fullSiteURL(app, torrent.downloadURL())
        item.id(torrent.infohash)
        item.link(href=url)
        item.title(torrent.title)
        item.description(torrent.summary(100))
    return feed
コード例 #39
0
class TestExtensionTorrent(unittest.TestCase):

    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('torrent')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_podcastEntryItems(self):
        fe = self.fg.add_item()
        fe.title('y')
        fe.torrent.filename('file.xy')
        fe.torrent.infohash('123')
        fe.torrent.contentlength('23')
        fe.torrent.seeds('1')
        fe.torrent.peers('2')
        fe.torrent.verified('1')
        assert fe.torrent.filename() == 'file.xy'
        assert fe.torrent.infohash() == '123'
        assert fe.torrent.contentlength() == '23'
        assert fe.torrent.seeds() == '1'
        assert fe.torrent.peers() == '2'
        assert fe.torrent.verified() == '1'

        # Check that we have the item in the resulting XML
        ns = {'torrent': 'http://xmlns.ezrss.it/0.1/dtd/'}
        root = etree.fromstring(self.fg.rss_str())
        filename = root.xpath('/rss/channel/item/torrent:filename/text()',
                              namespaces=ns)
        assert filename == ['file.xy']
コード例 #40
0
ファイル: main.py プロジェクト: crgwbr/wt-podcast
    def export_feed(self, output):
        fg = FeedGenerator()
        fg.load_extension('podcast')
        fg.podcast.itunes_category('Religion & Spirituality', 'Christianity')
        fg.podcast.itunes_image("%s/icon.png" % URL_BASE)

        fg.title('JW.ORG Magazines')
        fg.description('Combined Feed of Watchtower (public), Watchtower (study), and Awake! in English from jw.org.')
        fg.link(href="%s/%s" % (URL_BASE, output), rel='self')

        manifest = self._load()
        entries = []
        for lang, mnemonics in manifest.items():
            for mnemonic, issues in mnemonics.items():
                for issue, data in issues.items():
                    entries.append((issue, data))

        for issue, entry in sorted(entries, key=lambda i: i[0], reverse=True):
            fe = fg.add_entry()

            fe.id( entry['hash'] )
            fe.title( entry['title'] )
            fe.description( entry['title'] )
            fe.published( pytz.utc.localize( entry['created_on'] ) )
            url = "%s/%s" % (URL_BASE, os.path.basename(entry['file']))
            mime = 'audio/mpeg'
            fe.enclosure(url, str(entry['duration']), mime)
            fe.link(href=url, type=mime)
        fg.rss_str(pretty=True)
        fg.rss_file(os.path.join(CACHE_DIR, output))
コード例 #41
0
def rss(lang: str):
    if not isinstance(lang, str) and lang.isalpha() and len(lang) <= 3:
        abort(404)
    posts = get_blog_posts(lang)
    if not posts:
        abort(404)
    fg = FeedGenerator()
    fg.title('مدونة يوسف شعبان' if lang == 'ar' else 'yshalsager Blog')
    fg.description('آخر التدوينات على مدونتي' if lang ==
                   'ar' else 'Latest Feeds from my blog')
    fg.link(href=current_app.config['DOMAIN'], rel='alternate')
    fg.logo(f"{current_app.config['DOMAIN']}/static/img/logo.svg")
    fg.language(lang)

    for post in posts:
        fe = fg.add_entry()
        fe.title(post.title)
        fe.link(
            href=f"{current_app.config['DOMAIN']}/{lang}/blog/{post.file.stem}"
        )
        fe.description(post.html)
        fe.pubDate(
            datetime.combine(datetime.strptime(post.date, "%d-%m-%Y"),
                             datetime.min.time(),
                             tzinfo=timezone.utc))

    response = make_response(fg.rss_str(pretty=True))
    response.headers.set('Content-Type', 'application/rss+xml')

    return response
コード例 #42
0
ファイル: snapfeed.py プロジェクト: matthazinski/snapfeed
def gen_feed(user, base_url, path, debug=False):
    # Create feed
    feed = FeedGenerator()
    feed.id(urlparse.urljoin(base_url, user + '.xml'))
    feed.title('Snapchat story for ' + user)
    feed.link( href=urlparse.urljoin(base_url, user + '.xml'), rel='self' )
    feed.language('en')
    feed.description('Snapchat media')


    # Iterate through files in path, sort by unix timestamp (newest first), then add to feed
    files = sorted(os.listdir(path), reverse=True)

    for filename in files:
        split = filename.split('~')

        if split[0] != user:
            continue
        
        if os.path.splitext(filename)[1] in ['.mp4', '.jpg']:
            entry = feed.add_entry()
            entry.id(urlparse.urljoin(base_url, filename))
            entry.link(href=urlparse.urljoin(base_url, filename))
            entry.title(filename)

    
    # Write feed to disk
    feed.rss_file(os.path.join(path, user + '.xml'))
    date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")

    if debug:
        print('{0}  Regenerated {1}'.format(date, urlparse.urljoin(base_url, 
                                                               user + '.xml')))
コード例 #43
0
ファイル: main.py プロジェクト: ryotarai/github_trends_rss
def generate_rss(language, since):
    url = "{0}?since={1}".format(language["url"], since)
    file_name = "github_trends_{0}_{1}.rss".format(language["key"], since)
    title = "GitHub Trends - {0} - {1}".format(language["name"], since.capitalize())

    print(url)
    page = requests.get(url)
    tree = html.fromstring(page.content)
    lis = tree.cssselect("ol.repo-list li")

    fg = FeedGenerator()
    fg.title(title)
    fg.link(href="http://github-trends.ryotarai.info/rss/{0}".format(file_name))
    fg.description(title)
    index = 1
    for li in lis:
        a = li.cssselect("h3 a")[0]
        description = ""
        ps = li.cssselect("p")
        if len(ps) > 0:
            description = ps[0].text_content().strip()

        fe = fg.add_entry()
        fe.link(href="https://github.com{0}".format(a.get("href")))
        fe.title("{0} (#{1} - {2} - {3})".format(
            a.text_content().strip().replace(" / ", "/"),
            index,
            language["name"],
            since.capitalize(),
        ))
        fe.description(description)
        index += 1
    rssfeed = fg.rss_str(pretty=True)
    s3.Object(bucket, 'rss/{0}'.format(file_name)).put(Body=rssfeed, ContentType="application/xml")
コード例 #44
0
ファイル: technowatch.py プロジェクト: TheBlusky/technowatch
def build():
    global fg
    fg = FeedGenerator()
    fg.title(parser.get('technowatch', 'name'))
    fg.language('en')
    fg.description(parser.get('technowatch', 'name'))
    fg.link(href=parser.get('technowatch', 'link'), rel='alternate')
    # Cleaning stories if too much
    if len(known_stories) > int(parser.get('technowatch', 'cache_max')):
        clean()
    # Sorting stories by crawled date
    for item in sorted(known_stories.values(), key=operator.itemgetter('crawledDate'), reverse=True):
        fe = fg.add_entry()
        fe.link(href=item['url'], rel='alternate')
        fe.title("[" + item['type'] + "] " + item['title'])
        fe.category({
            'label': item['type'],
            'term': item['type']
        })
        fe.author({'name': item['by']})
        fe.description(item['desc'])
        fe.pubdate(item['crawledDate'])
    # Caching RSS building
    pickle.dump(known_stories, open(cust_path + "/technowatch.data", "wb"))
    if parser.get('wsgi', 'activated') == "True":
        fg.rss_file(cust_path + '/static/rss.xml')
    if parser.get('ftp', 'activated') == "True":
        upload()
コード例 #45
0
ファイル: views.py プロジェクト: chachan/bearblog
def feed(request):
    http_host = request.META['HTTP_HOST']

    if http_host == 'bearblog.dev' or http_host == 'www.bearblog.dev' or http_host == 'localhost:8000':
        return redirect('/')
    elif 'bearblog.dev' in http_host or 'localhost:8000' in http_host:
        extracted = tldextract.extract(http_host)
        blog = get_object_or_404(Blog, subdomain=extracted.subdomain)
        root = get_root(blog.subdomain)
    else:
        blog = get_object_or_404(Blog, domain=http_host)
        root = http_host

    all_posts = blog.post_set.filter(publish=True,
                                     is_page=False).order_by('-published_date')

    fg = FeedGenerator()
    fg.id(f'{root}/')
    fg.author({'name': blog.subdomain, 'email': 'hidden'})
    fg.title(blog.title)
    fg.subtitle(unmark(blog.content)[:160])
    fg.link(href=f"{root}/feed/", rel='self')
    fg.link(href=root, rel='alternate')

    for post in all_posts:
        fe = fg.add_entry()
        fe.id(f"{root}/{post.slug}")
        fe.title(post.title)
        fe.author({'name': blog.subdomain, 'email': 'hidden'})
        fe.link(href=f"{root}/feed")
        fe.content(unmark(post.content))

    atomfeed = fg.atom_str(pretty=True)
    return HttpResponse(atomfeed, content_type='application/atom+xml')
コード例 #46
0
def init_feed_generator(feed):
    feed_generator = FeedGenerator()
    feed_generator.load_extension('podcast')
    feed_generator.title("PocketCast")
    feed_generator.link(href=feed.feed.link, rel='alternate')
    feed_generator.subtitle(feed.feed.description or 'PocketCast')
    return feed_generator
コード例 #47
0
ファイル: test_dc.py プロジェクト: yradunchev/python-feedgen
class TestExtensionDc(unittest.TestCase):
    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('dc')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_entryLoadExtension(self):
        fe = self.fg.add_item()
        try:
            fe.load_extension('dc')
        except ImportError:
            pass  # Extension already loaded

    def test_elements(self):
        for method in dir(self.fg.dc):
            if method.startswith('dc_'):
                m = getattr(self.fg.dc, method)
                m(method)
                assert m() == [method]

        self.fg.id('123')
        assert self.fg.atom_str()
        assert self.fg.rss_str()
コード例 #48
0
ファイル: main.py プロジェクト: iPieter/peacock
def generate_feeds(config_data, output_path, drafts=False):
    fg = FeedGenerator()
    fg.id("1234")
    fg.title(config_data["RSS_title"])
    fg.author({
        "name": config_data["RSS_author_name"],
        "email": config_data["RSS_author_email"],
    })
    fg.link(href=config_data["RSS_link"], rel="alternate")
    fg.logo(config_data["RSS_logo"])
    fg.description(config_data["RSS_subtitle"])
    fg.link(href=config_data["RSS_link"] + "/test.atom", rel="self")
    fg.language(config_data["RSS_language"])

    for post in config_data["blog_posts"]:
        fe = fg.add_entry()
        fe.id(config_data["RSS_link"] + post["url"] + "/")
        fe.title(post["post_title"])
        fe.summary(post["abstract"])
        fe.published(
            datetime.strptime(post["date"], "%Y-%m-%d").isoformat() + "+00:00")
        fe.link(href=config_data["RSS_link"] + post["url"] + "/")

    fg.atom_file(os.path.join(output_path, "atom.xml"))
    fg.rss_file(os.path.join(output_path, "rss.xml"))
コード例 #49
0
 def generate_feed(page=1):
     feed = FeedGenerator()
     feed.id("https://pub.dartlang.org/feed.atom")
     feed.title("Pub Packages for Dart")
     feed.link(href="https://pub.dartlang.org/", rel="alternate")
     feed.link(href="https://pub.dartlang.org/feed.atom", rel="self")
     feed.description("Last Updated Packages")
     feed.author({"name": "Dart Team"})
     i = 1
     pager = QueryPager(int(page),
                        "/feed.atom?page=%d",
                        Package.all().order('-updated'),
                        per_page=10)
     for item in pager.get_items():
         i += 1
         entry = feed.add_entry()
         for author in item.latest_version.pubspec.authors:
             entry.author({"name": author[0]})
         entry.title("v" + item.latest_version.pubspec.get("version") +\
             " of " + item.name)
         entry.link(link={"href": "https://pub.dartlang.org/packages/" +\
             item.name, "rel": "alternate", "title": item.name})
         entry.id(uuid.uuid5(uuid.NAMESPACE_URL,
             ("https://pub.dartlang.org/packages/" + item.name + "#" +\
             item.latest_version.pubspec.get("version")).encode('utf-8')).urn)
         entry.description(
             item.latest_version.pubspec.get("description",
                                             "Not Available"))
         readme = item.latest_version.readme_obj
         if readme is not None:
             entry.content(readme.render(), type='html')
         else:
             entry.content("<p>No README Found</p>", type='html')
     return feed
コード例 #50
0
ファイル: tests.py プロジェクト: etalab/udata-gouvfr
    def feed(self, feed_title, title, content, url, published=None, summary=None,
             enclosure=None, media_thumbnail=None):
        feed = FeedGenerator()
        feed.title(feed_title)
        feed.description(faker.sentence())
        feed.link({'href': WP_FEED_URL})

        entry = feed.add_entry()
        entry.title(title)
        entry.link({'href': url})
        entry.author(name=faker.name())
        entry.content(content, type="cdata")
        if summary:
            entry.description(summary)
        if enclosure:
            entry.enclosure(url=enclosure['url'],
                            type=enclosure['type'],
                            length=str(faker.pyint()))
        if media_thumbnail:
            feed.load_extension('media')
            entry.media.thumbnail({'url': media_thumbnail})
        tz = pytz.timezone(faker.timezone())
        published = published or faker.date_time(tzinfo=tz)
        entry.published(published)
        entry.updated(faker.date_time_between(start_date=published, tzinfo=tz))

        return feed.rss_str().decode('utf8')
コード例 #51
0
def RSSClient():
	rss_name = request.args.get('rss_name', 'None')
	if rss_name=='None':
		abort(404)
	fg = FeedGenerator()
	#TODO create icon
	# fg.icon('http://www.det.ua.pt')
	fg.title(config['rss'][rss_name]['title'])
	fg.description(config['rss'][rss_name]['description'])
	if 'language' in config['rss'][rss_name]:
		fg.language(config['rss'][rss_name]['language'])
	fg.link(href=config['rss'][rss_name]['href'], rel='related')
	client = EmailClient(rss_name)

	for msgn in reversed(client.listBox()[:config['rss'].get('maxitems', 10)]):
		app.logger.info("RSS Entry: "+msgn.decode('utf-8'))
		em = client.getEMail(msgn)
		entry = fg.add_entry()
		entry.title(em['subject'])
		entry.guid(config["main"]["baseurl"]+'mail?rss_name='+rss_name+'&uid='+msgn.decode('utf-8'))
		entry.link({'href':config["main"]["baseurl"]+'mail?rss_name='+rss_name+'&uid='+msgn.decode('utf-8'), 'rel':'alternate'})
		entry.pubDate(em['date'])
		entry.content(em['body'])
	response = make_response(fg.rss_str(pretty=True))
	response.headers["Access-Control-Allow-Origin"] = "*"
	response.headers['Content-Type'] = 'application/rss+xml'
	return response
コード例 #52
0
def generate_feed():
    ''' render feed '''
    from feedgen.feed import FeedGenerator

    feed_name = 'feed_name'
    fg = FeedGenerator()
    fg.id('xxxurl/' + feed_name)
    fg.title(feed_name)
    fg.link(href='xxxurl/' + feed_name, rel='alternate')
    # fg.logo('http://ex.com/logo.jpg')
    fg.subtitle('by FeedGenerator')
    fg.link(href='xxxurl/' + feed_name + 'atom', rel='self')
    fg.language('zh-cn')

    for page in sorted(pages):
        fe = fg.add_entry()
        fe.id(page.metadata['url'])
        fe.title(page.metadata['title'])
        fe.link(href=page.metadata['url'])
        fe.description('\n\n' + page.to_html() + '\n')

    return fg.rss_str(pretty=True)
    # 或者
    if feed_path:
        return Response(open(feed_path, encoding='utf-8').read(),
                        mimetype='application/xml')
    else:
        abort(404)
コード例 #53
0
 def generate(self):
     main_feed_generator = FeedGenerator()
     main_feed_generator.title('MIUI Updates Tracker by XiaomiFirmwareUpdater')
     main_feed_generator.link(href=website, rel='alternate')
     main_feed_generator.description('Your best website to track MIUI ROM releases!')
     main_feed_generator.language('en')
     main_feed_generator.logo(f'{website}/images/xfu.png')
     main_feed_generator.lastBuildDate(None)
     for update in self.updates:
         short_codename = update.codename.split('_')[0]
         if short_codename not in self.feeds.keys():
             feed_generator = FeedGenerator()
             feed_generator.title(f'{update.name} MIUI Updates Tracker by XiaomiFirmwareUpdater')
             feed_generator.link(href=f'{website}/miui/{short_codename}', rel='alternate')
             feed_generator.description('Your best website to track MIUI ROM releases!')
             feed_generator.language('en')
             feed_generator.logo(f'{website}/images/xfu.png')
             feed_generator.lastBuildDate(None)
         else:
             feed_generator = self.feeds.get(short_codename)
         feed_generator = self.add_feed_entry(feed_generator, update)
         self.feeds.update({short_codename: feed_generator})
         main_feed_generator = self.add_feed_entry(main_feed_generator, update)
     main_feed_generator.rss_file(f"{CONF_DIR}/rss/latest.xml")
     for codename, feed in self.feeds.items():
         feed.rss_file(f"{CONF_DIR}/rss/{codename}.xml")
コード例 #54
0
ファイル: rssmanager.py プロジェクト: mfortini/alboPOP_saga
    def out_rss(self,filename):
	fg=FeedGenerator()
	fg.register_extension('albopop',AlbopopExtension,AlbopopEntryExtension)
	fg.id(self.url)
	fg.title(self.title)
	fg.description(self.title)
	fg.author({'name':'alboPOP','email':''})
	fg.link(href=self.url)
	fg.pubDate(formatdate())
	fg.webMaster(self.webMaster)
	fg.docs('https://github.com/mfortini/alboPOP_saga')
	fg.language('it')

	fg.albopop.categoryName(self.categoryName)
	fg.albopop.categoryType(self.categoryType)

	for item in self.items:
		fe=fg.add_entry()
		fe.id(item['link'])
		fe.category(term=item['tipo'])
		fe.pubdate(item['pubDate'])
		fe.link(href=item['link'])
		fe.title(item['title'])
		fe.description(item['description'])
		fe.albopop.categoryUID(str(item['numero'])+'/'+str(item['anno']))

	fg.rss_file(filename)
コード例 #55
0
ファイル: rss.py プロジェクト: Ullarah/TrashLinks
def route(user):
    init_session()
    fg = FeedGenerator()
    fg.title(session['site_name'])
    fg.subtitle('Trash bag of links')
    fg.generator(session['site_name'])
    fg.link(href=get_config_value('address'))
    fg.logo(f'{get_config_value("address")}/static/img/logo.png')
    fg.language('en')

    rss_posts = get_all_posts_for_rss(
    ) if user is None else get_all_user_posts_for_rss(user)

    for post in reversed(rss_posts):
        post_title = Markup((post.title[:60] +
                             '...') if len(post.title) > 60 else post.title)
        fe = fg.add_entry()
        fe.title(f'[{post.tags}] {post_title}')
        fe.link(href=html.unescape(Markup(post.url)))
        fe.author(name=post.username)
        fe.pubDate(
            datetime.datetime.fromtimestamp(post.datetime).strftime('%c +10'))
        fe.updated(
            datetime.datetime.fromtimestamp(post.updated).strftime('%c +10'))

    response = make_response(fg.rss_str(pretty=True))
    response.headers.set('Content-Type', 'application/rss+xml')

    return response
コード例 #56
0
def rss(_):
    """Returns the XML content of my RSS feed for the music part of the website.

    NOTE: We are doing no caching here at all right now, because this function is very fast and the website has
    no traffic. If this situation changes, then I should cache it so that I don't build this object from scratch every
    time."""
    generator = FeedGenerator()

    # Add basic metadata.
    generator.title("Paul's Music Feed")
    generator.author(name=MY_NAME, email=MY_EMAIL)
    generator.contributor(name=MY_NAME, email=MY_EMAIL)
    # RSS requires that we point to our own feed here. Not sure why.
    generator.link(href=(URL_ROOT + "rss"), rel="self")
    favicon_path = URL_ROOT + "static/favicon.png"
    generator.icon(favicon_path)
    generator.logo(favicon_path)
    generator.subtitle(
        "A feed for anyone who wants to know what albums I'm liking.")
    generator.language("en")

    albums = get_recent_music(quantity=30)
    for album in albums:
        entry = generator.add_entry()
        entry.title(album.name)
        path_to_album = URL_ROOT + "music/music/{}".format(album.id)
        entry.guid(path_to_album, permalink=True)
        entry.description(album.description())
        entry.updated(album.reviewed_at)
        entry.published(album.reviewed_at)
        entry.author(name=MY_NAME, email=MY_EMAIL)
        entry.link(href=path_to_album, rel="alternate")
        entry.category(term="score__{}".format(album.rating))

    return HttpResponse(generator.rss_str())
コード例 #57
0
ファイル: feedTumblr.py プロジェクト: fernand0/scripts
def main():

    client = moduleSocial.connectTumblr()

    posts = client.posts('fernand0')
    
    fg = FeedGenerator()
    fg.id(posts['blog']['url'])
    fg.title(posts['blog']['title'])
    fg.author( {'name':posts['blog']['name'],'email':'*****@*****.**'} )
    fg.link( href=posts['blog']['url'], rel='alternate' )
    fg.subtitle('Alternate feed due to Tumblr GDPR restrictions')
    fg.language('en')

    print(len(posts['posts']))
    for i in range(len(posts['posts'])):
        fe = fg.add_entry()
        print(posts['posts'][i]['post_url'])
        if 'title' in posts['posts'][i]:
            title = posts['posts'][i]['title']
            print('T', posts['posts'][i]['title'])
        else:
            title = posts['posts'][i]['summary'].split('\n')[0]
            print('S', posts['posts'][i]['summary'].split('\n')[0])
        fe.title(title)
        fe.link(href=posts['posts'][i]['post_url'])
        fe.id(posts['posts'][i]['post_url'])

    print(fg.atom_file('/var/www/html/elmundoesimperfecto/tumblr.xml'))

    sys.exit()
コード例 #58
0
def build_rss_feeds(blog_posts):
    feed = FeedGenerator()
    feed.load_extension("media", rss=True, atom=True)
    base = "https://whotracks.me"

    for post in blog_posts:
        if post["publish"]:
            entry = feed.add_entry()
            entry.id(f'{base}/blog/{post["filename"]}.html')
            entry.title(post["title"])
            entry.link(link={"href": f"{base}/blog/{post['filename']}.html"})
            entry.author({"name": post["author"]})
            entry.pubDate(
                datetime.strptime(post["date"],
                                  "%Y-%m-%d").replace(tzinfo=timezone("CET")))
            entry.description(post["subtitle"])
            entry.media.thumbnail(
                url=f'https://whotracks.me/static/img/{post["header_img"]}')

    feed.title("WhoTracksMe blog")
    feed.description("By the Ghostery tech team")
    feed.link(link={"href": f"{base}/blog.html"})

    feed.id("wtm")
    feed.language("en")
    feed.logo(f"{base}/static/img/who-tracksme-logo.png")

    feed.rss_file("_site/blog/feed.xml")
コード例 #59
0
def latestRss(userID):
    userID = userID.lower()

    shows = {}
    episodes = []
    today = date.today().strftime('%Y-%m-%d')
    for showID in series.getUserShowList(userID):
        shows[showID] = series.getShowInfo(userID,
                                           showID,
                                           withEpisodes=True,
                                           onlyUnseen=True)
        episodes.extend(
            (showID, episode) for episode in shows[showID]['episodes']
            if episode['airdate'] and airdateKey(episode['airdate']) < today)

    episodes.sort(key=episodeAirdateKey, reverse=True)

    feed = FeedGenerator()
    feed.id(userID)
    feed.title('%s\'s shows' % userID)
    feed.description('Unseen episodes')
    feed.link(href=request.url_root)
    feed.language('en')

    for showID, episode in episodes:
        entry = feed.add_entry()
        entry.id('%s/%s' % (showID, episode['episode_id']))
        entry.title('%s S%02dE%02d: %s' %
                    (shows[showID]['name'], episode['season'],
                     episode['episode'], episode['title']))

    return feed.rss_str(pretty=True)
コード例 #60
0
class TestExtensionSyndication(unittest.TestCase):

    SYN_NS = {'sy': 'http://purl.org/rss/1.0/modules/syndication/'}

    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('syndication')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_update_period(self):
        for period_type in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'):
            self.fg.syndication.update_period(period_type)
            root = etree.fromstring(self.fg.rss_str())
            a = root.xpath('/rss/channel/sy:UpdatePeriod',
                           namespaces=self.SYN_NS)
            assert a[0].text == period_type

    def test_update_frequency(self):
        for frequency in (1, 100, 2000, 100000):
            self.fg.syndication.update_frequency(frequency)
            root = etree.fromstring(self.fg.rss_str())
            a = root.xpath('/rss/channel/sy:UpdateFrequency',
                           namespaces=self.SYN_NS)
            assert a[0].text == str(frequency)

    def test_update_base(self):
        base = '2000-01-01T12:00+00:00'
        self.fg.syndication.update_base(base)
        root = etree.fromstring(self.fg.rss_str())
        a = root.xpath('/rss/channel/sy:UpdateBase', namespaces=self.SYN_NS)
        assert a[0].text == base