Beispiel #1
0
def write_podcast(show, podcast_dir, base_public_url, showlocal_tz):
    """Create the podcast file."""
    fg = FeedGenerator()
    fg.load_extension('podcast')

    url = "{}{}.xml".format(base_public_url, show.id)
    fg.id(url.split('.')[0])
    fg.title(show.name)
    fg.image(show.image_url)
    fg.description(show.description)
    fg.link(href=url, rel='self')

    # collect all mp3s for the given show
    all_mp3s = glob.glob(os.path.join(podcast_dir, "{}_*.mp3".format(show.id)))

    for filepath in all_mp3s:
        filename = os.path.basename(filepath)
        mp3_date = _get_date_from_mp3_path(filepath, showlocal_tz)
        mp3_size = os.stat(filepath).st_size
        mp3_url = base_public_url + filename
        mp3_id = filename.split('.')[0]
        title = "Programa del {0:%d}/{0:%m}/{0:%Y}".format(mp3_date)

        # build the rss entry
        fe = fg.add_entry()
        fe.id(mp3_id)
        fe.pubdate(mp3_date)
        fe.title(title)
        fe.enclosure(mp3_url, str(mp3_size), 'audio/mpeg')

    fg.rss_str(pretty=True)
    fg.rss_file(os.path.join(podcast_dir, '{}.xml'.format(show.id)))
Beispiel #2
0
def latestRss(userID):
    userID = userID.lower()

    shows = {}
    episodes = []
    today = date.today().strftime('%Y-%m-%d')
    for showID in series.getUserShowList(userID):
        shows[showID] = series.getShowInfo(userID, showID, withEpisodes=True, onlyUnseen=True)
        episodes.extend((showID, episode) for episode in shows[showID]['episodes'] if episode['airdate'] and airdateKey(episode['airdate']) < today)

    episodes.sort(key=episodeAirdateKey, reverse=True)

    feed = FeedGenerator()
    feed.id(userID)
    feed.title('%s\'s shows' % userID)
    feed.description('Unseen episodes')
    feed.link(href=request.url_root)
    feed.language('en')

    for showID, episode in episodes:
        entry = feed.add_entry()
        entry.id('%s/%s' % (showID, episode['episode_id']))
        entry.title('%s S%02dE%02d: %s' % (shows[showID]['name'], episode['season'], episode['episode'], episode['title']))

    return feed.rss_str(pretty=True)
class TestExtensionTorrent(unittest.TestCase):

    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('torrent')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_podcastEntryItems(self):
        fe = self.fg.add_item()
        fe.title('y')
        fe.torrent.filename('file.xy')
        fe.torrent.infohash('123')
        fe.torrent.contentlength('23')
        fe.torrent.seeds('1')
        fe.torrent.peers('2')
        fe.torrent.verified('1')
        assert fe.torrent.filename() == 'file.xy'
        assert fe.torrent.infohash() == '123'
        assert fe.torrent.contentlength() == '23'
        assert fe.torrent.seeds() == '1'
        assert fe.torrent.peers() == '2'
        assert fe.torrent.verified() == '1'

        # Check that we have the item in the resulting XML
        ns = {'torrent': 'http://xmlns.ezrss.it/0.1/dtd/'}
        root = etree.fromstring(self.fg.rss_str())
        filename = root.xpath('/rss/channel/item/torrent:filename/text()',
                              namespaces=ns)
        assert filename == ['file.xy']
def _filter_fb_rss_feeed(url):
    parsed_feed = feedparser.parse(url)
    filtered_entries = filter(
        lambda x: ' shared a link: "' in x.title, parsed_feed.entries)

    fg = FeedGenerator()
    fg.id('https://fb-notifications-to-pocket.herokuapp.com/')
    fg.title('Facebook Notifications to Pocket')
    fg.author({'name': 'Pankaj Singh', 'email': '*****@*****.**'})
    fg.description(
        '''Filter FB notifications which contain a link and generate a new rss feed which will be used by IFTTT''')
    fg.link(href='https://fb-notifications-to-pocket.herokuapp.com/')

    for entry in filtered_entries:
        root = etree.HTML(entry.summary_detail.value)
        title = entry.title.split(" shared a link: ")[1].strip()[1:-2]
        author_name = entry.title.split(" shared a link: ")[0].strip()
        url = urlparse.parse_qs(
            urlparse.urlparse(root.findall(".//a")[-1].attrib["href"]).query)["u"][0]

        title = get_title_for_url(url) or title

        fe = fg.add_entry()
        fe.id(entry.id)
        fe.link(href=url)
        fe.published(entry.published)
        fe.author({'name': author_name})
        fe.title(title)

    return fg.atom_str(pretty=True)
Beispiel #5
0
def generate_feed(output_file, exclude_highlights=True):
    # Parse RSS feed
    d = feedparser.parse(ESPN_RSS_FEED)
    IMAGE_URL = d.feed.image["href"]

    # RSS feed generation
    fg = FeedGenerator()
    fg.load_extension("podcast", rss=True)

    ## RSS tags
    # Required
    fg.title(d.feed.title)
    fg.link(href="https://github.com/aaearon/lebatard-show-rss")
    fg.description(d.feed.description)
    # Optional
    fg.language(d.feed.language)
    fg.image(IMAGE_URL)
    fg.subtitle(d.feed.subtitle)
    # iTunes
    fg.podcast.itunes_author(d.feed.author)
    fg.podcast.itunes_category(itunes_category=d.feed.category)
    fg.podcast.itunes_image(itunes_image=IMAGE_URL)
    fg.podcast.itunes_explicit(itunes_explicit="clean")
    fg.podcast.itunes_owner(name=CONTACT["name"], email=CONTACT["email"])

    tz = pytz.timezone("America/Los_Angeles")

    for e in d.entries:

        if exclude_highlights and episode_duration_string_to_int(e["itunes_duration"]) > 3600:
            pass
        else:
            fe = fg.add_entry()

            fe.id(e.id)
            fe.title(e.title)
            fe.description(e.description)
            fe.enclosure(url=e.enclosures[0]["href"], length=e.enclosures[0]["length"], type=e.enclosures[0]["type"])

            fe.podcast.itunes_summary(e.description)
            fe.podcast.itunes_subtitle(e.description)
            fe.podcast.itunes_duration(e["itunes_duration"])

            dt = datetime.fromtimestamp(time.mktime(e.published_parsed))
            date = tz.localize(dt)

            # Local hour
            if "Show: " in e.title:
                fe.published(date)
            elif "Hour 1" in e.title:
                fe.published(date + timedelta(hours=1))
            elif "Hour 2" in e.title:
                fe.published(date + timedelta(hours=2))
            elif "Hour 3" in e.title:
                fe.published(date + timedelta(hours=3))
            else:
                fe.published(date + timedelta(hours=-1))

    fg.rss_str(pretty=True)
    fg.rss_file(output_file)
Beispiel #6
0
def main():
    session = vk.Session()
    api = vk.API(session)

    group_id = '96469126'

    group_info = api.groups.getById(group_ids=group_id, fields=['description', 'site', 'name', 'photo', 'gid'])

    assert len(group_info) == 1
    group_info = group_info[0]

    url = 'http://vk.com/club{}'.format(group_info['gid'])
    # a = api.wall.get(owner_id=-1 * group_info['gid'])
    #
    # with open('out', 'wb') as fio:
    #     pickle.dump(a, fio)

    with open('out', 'rb') as fio:
        data = pickle.loads(fio.read())

    assert len(data) > 1

    fg = FeedGenerator()
    fg.id(url)
    fg.title(_(group_info['name']))
    fg.description(_(group_info['description']))
    fg.logo(group_info['photo'])
    site_url = group_info.get('site', url) if group_info.get('site', url) else url
    fg.link(href=_(site_url))
    fg.link(href=_(site_url), rel='self')
    fg.link(href=_(site_url), rel='alternate')
    fg.author({'name': 'Alexander Sapronov', 'email': '*****@*****.**'})
    fg.webMaster('[email protected] (Alexander Sapronov)')

    pat = re.compile(r"#(\w+)")

    for x in data[1:]:
        post_link = "{}?w=wall-{}_{}".format(url, group_info['gid'], x['id'])
        e = fg.add_entry()
        # text = x.get('text', '').replace('<br>', '\n')
        text = x.get('text', '')

        e.description(_(text))
        e.author({'name': _(get_author_name(api, x.get('from_id')))})
        e.id(post_link)
        e.link(href=_(post_link))
        e.link(href=_(post_link), rel='alternate')

        tags = pat.findall(text)

        title = x.get('text', '')
        for tag in tags:
            e.category(term=_(tag))
            title = title.replace('#{}'.format(tag), '')

        title = re.sub('<[^<]+?>', ' ', title)
        title = textwrap.wrap(title, width=80)[0]
        e.title(_(title.strip()))

    fg.rss_file('rss.xml')
Beispiel #7
0
class YoutubeFeed:  
    ydl_opts = {
        'format': 'bestaudio/best',
        'outtmpl': '%(id)s.%(ext)s',
        'postprocessors': [{
            'key': 'FFmpegExtractAudio',
            'preferredcodec': 'mp3',
            'preferredquality': '192',
        }]
    }

    def __init__(self, name):
        self.name = name
        self.ydl = youtube_dl.YoutubeDL(self.ydl_opts)

        self.fg = FeedGenerator()
        self.fg.title(name)
        self.fg.author({"name": "Youtube Audio Feed", "email": ""})
        self.fg.link(href="http://www.foo.bar.baz.com", rel="alternate")
        self.fg.description("Personalized Youtube audio feed")
        self.fg.generator("")
        self.fg.docs("")

    def add_video(self, url):
        info = self.ydl.extract_info(url, download=True)
        entry = self.fg.add_entry()
        entry.id(info['id'])
        entry.title(info['title'])
        entry.description(info['description'])
        entry.enclosure(info['id'] + ".mp3", str(info['duration']), 'audio/mpeg')

    def save(self):
        self.fg.rss_file(name + '.xml')
Beispiel #8
0
 def generate_feed(page=1):
     feed = FeedGenerator()
     feed.id("https://pub.dartlang.org/feed.atom")
     feed.title("Pub Packages for Dart")
     feed.link(href="https://pub.dartlang.org/", rel="alternate")
     feed.link(href="https://pub.dartlang.org/feed.atom", rel="self")
     feed.description("Last Updated Packages")
     feed.author({"name": "Dart Team"})
     i = 1
     pager = QueryPager(int(page), "/feed.atom?page=%d",
                        Package.all().order('-updated'),
                        per_page=10)
     for item in pager.get_items():
         i += 1
         entry = feed.add_entry()
         for author in item.latest_version.pubspec.authors:
             entry.author({"name": author[0]})
         entry.title("v" + item.latest_version.pubspec.get("version") +\
             " of " + item.name)
         entry.link(link={"href": item.url, "rel": "alternate",
             "title": item.name})
         entry.id(
             "https://pub.dartlang.org/packages/" + item.name + "#" +\
             item.latest_version.pubspec.get("version"))
         entry.description(
             item.latest_version.pubspec
             .get("description", "Not Available"))
         readme = item.latest_version.readme
         if not readme is None:
             entry.content(item.latest_version.readme.render(), type='html')
         else:
             entry.content("<p>No README Found</p>", type='html')
     return feed
Beispiel #9
0
def rss():    
    config = public_app.config['feed']
    fg = FeedGenerator()
    fg.id('%s/blog' % Config.BASE_URL)
    fg.title(config['title'])
    fg.author( {'name': config['author'],'email': config['email']} )
    fg.description(config['desc'])
    fg.link( href=Config.BASE_URL, rel='alternate' )
    query = {
        'id': { '$regex': 'blog' },
        'current': True,
        'meta.hide': { '$ne': True }
    }
    posts = db.pages.find(query).sort('meta.created', -1)[:20]
    for post in posts:
        fe = fg.add_entry()
        fe.title(post['meta']['title'])
        if 'author' in post['meta']:
            fe.author( {'name': post['meta']['author'],'email': config['email']} )
        else:
            fe.author( {'name': config['author'],'email': config['email']} )
        fe.description(do_truncate(post['content'], 300))
        fe.link(href="%s/%s" % (Config.BASE_URL, post['id']), rel='alternate')
        fe.pubdate(utc.localize(post['meta']['created']))
        fe.content(post['content'])    
    response.headers['Content-Type'] = 'application/rss+xml'
    return fg.rss_str(pretty=True)
Beispiel #10
0
def generate(app, category, torrents):
    """
    generate an rss feed from category with torrents as results
    if category is None this feed is for all categories
    """
    feed = FeedGenerator()
    if category:
        url = util.fullSiteURL(app, 'feed', '{}.rss'.format(category))
    else:
        url = util.fullSiteURL(app, 'feed', 'all.rss')
    feed.link(href=url, rel="self")
    feed.id(url)
    if category:
        title = "new {} torrents on index ex invisibilis".format(category)
    else:
        title = "new torrents on index ex invisibilis"
    feed.title(title)
    feed.description(title)
    feed.author({"name": "anonymous"})
    feed.language("en")
    for torrent in torrents:
        item = feed.add_entry()
        url = util.fullSiteURL(app, torrent.downloadURL())
        item.id(torrent.infohash)
        item.link(href=url)
        item.title(torrent.title)
        item.description(torrent.summary(100))
    return feed
Beispiel #11
0
def podcast_feed():
    logo_url = url_for("static", filename="wpclogo_big.png", _external=True)

    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.podcast.itunes_category('Technology', 'Podcasting')
    fg.podcast.itunes_image(logo_url)
    fg.author({'name': 'Nathan Kellert', 'email': '*****@*****.**'})
    fg.link(href='http://watchpeoplecode.com/podcast_feed.xml', rel='self')
    fg.title('WPC Coders Podcast')
    fg.description('WPC Coders Podcast is a weekly peek into the lives of developers and the WatchPeopleCode community. Our goal is to keep our listeners entertained by giving them new and interesting insights into our industry as well as awesome things happening within our own community. Here, you can expect hear about some of the latest news, tools, and opportunities for developers in nearly every aread of our industry. Most importantly, we hope to have some fun and a few laughs in ways only other nerds know how.')  # NOQA

    episodes = [('ep1.mp3', 'Episode 1', datetime(2015, 02, 21, 23), 'Learn all about the WPC hosts, and where we came from in Episode 1!'),
                ('ep2.mp3', 'Episode 2', datetime(2015, 02, 28, 23), 'This week we cover your news, topics and questions in episode 2!'),
                ('ep3.mp3', 'Episode 3', datetime(2015, 03, 07, 23), "On todays podcast we talk to WatchPeopleCode's founder Alex Putilin. Hear about how the reddit search engine thousands watched him write. Also, hear the inside scoop of how WatchPeopleCode got started!"),  # NOQA
                ('ep4.mp3', 'Episode 4', datetime(2015, 03, 14, 23), "This week we talk to FreeCodeCamps Quincy Larson(http://www.freecodecamp.com) about their project that combines teaching new developers how to code and completing projects for non-profits! Lets find out how this group of streamers code with a cause!")]  # NOQA

    for epfile, eptitle, epdate, epdescription in episodes[::-1]:
        epurl = "https://s3.amazonaws.com/wpcpodcast/{}".format(epfile)
        fe = fg.add_entry()
        fe.id(epurl)
        fe.title(eptitle)
        fe.description(epdescription)
        fe.podcast.itunes_image(logo_url)
        fe.pubdate(epdate.replace(tzinfo=pytz.UTC))
        fe.enclosure(epurl, 0, 'audio/mpeg')

    return Response(response=fg.rss_str(pretty=True),
                    status=200,
                    mimetype='application/rss+xml')
Beispiel #12
0
def build():
    global fg
    fg = FeedGenerator()
    fg.title(parser.get('technowatch', 'name'))
    fg.language('en')
    fg.description(parser.get('technowatch', 'name'))
    fg.link(href=parser.get('technowatch', 'link'), rel='alternate')
    # Cleaning stories if too much
    if len(known_stories) > int(parser.get('technowatch', 'cache_max')):
        clean()
    # Sorting stories by crawled date
    for item in sorted(known_stories.values(), key=operator.itemgetter('crawledDate'), reverse=True):
        fe = fg.add_entry()
        fe.link(href=item['url'], rel='alternate')
        fe.title("[" + item['type'] + "] " + item['title'])
        fe.category({
            'label': item['type'],
            'term': item['type']
        })
        fe.author({'name': item['by']})
        fe.description(item['desc'])
        fe.pubdate(item['crawledDate'])
    # Caching RSS building
    pickle.dump(known_stories, open(cust_path + "/technowatch.data", "wb"))
    if parser.get('wsgi', 'activated') == "True":
        fg.rss_file(cust_path + '/static/rss.xml')
    if parser.get('ftp', 'activated') == "True":
        upload()
Beispiel #13
0
def get_feed(query, title, description, link, image):
    """Get an RSS feed from the results of a query to the YouTube API."""
    service = _get_youtube_client()
    videos = service.search().list(part='snippet', **query, order='date',
                                   type='video', safeSearch='none').execute()
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.title(title)
    fg.description(description)
    fg.link(href=link, rel='alternate')
    fg.image(image)
    youtube_plugin = get_plugin_from_settings()

    for video in videos['items']:
        try:
            video_url = youtube_plugin.extract_link(
                "https://www.youtube.com/watch?v=" + video['id']['videoId'])
        except PluginException:
            continue
        fe = fg.add_entry()
        fe.id(video['id']['videoId'])
        fe.title(video['snippet']['title'])
        fe.description(video['snippet']['description'])
        fe.pubdate(dateutil.parser.parse(video['snippet']['publishedAt']))
        fe.podcast.itunes_image(video['snippet']['thumbnails']['high']['url'])
        video_info = requests.head(video_url)
        fe.enclosure(video_url, video_info.headers['Content-Length'],
                     video_info.headers['Content-Type'])
    return fg.rss_str(pretty=True)
Beispiel #14
0
    def setUp(self):
        fg = FeedGenerator()
        self.feedId = 'http://example.com'
        self.title = 'Some Testfeed'

        fg.id(self.feedId)
        fg.title(self.title)
        fg.link(href='http://lkiesow.de', rel='alternate')[0]
        fg.description('...')

        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The First Episode')
        fe.content(u'…')

        # Use also the different name add_item
        fe = fg.add_item()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The Second Episode')
        fe.content(u'…')

        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The Third Episode')
        fe.content(u'…')

        self.fg = fg
Beispiel #15
0
def generate_rss(language, since):
    url = "{0}?since={1}".format(language["url"], since)
    file_name = "github_trends_{0}_{1}.rss".format(language["key"], since)
    title = "GitHub Trends - {0} - {1}".format(language["name"], since.capitalize())

    print(url)
    page = requests.get(url)
    tree = html.fromstring(page.content)
    lis = tree.cssselect("ol.repo-list li")

    fg = FeedGenerator()
    fg.title(title)
    fg.link(href="http://github-trends.ryotarai.info/rss/{0}".format(file_name))
    fg.description(title)
    index = 1
    for li in lis:
        a = li.cssselect("h3 a")[0]
        description = ""
        ps = li.cssselect("p")
        if len(ps) > 0:
            description = ps[0].text_content().strip()

        fe = fg.add_entry()
        fe.link(href="https://github.com{0}".format(a.get("href")))
        fe.title("{0} (#{1} - {2} - {3})".format(
            a.text_content().strip().replace(" / ", "/"),
            index,
            language["name"],
            since.capitalize(),
        ))
        fe.description(description)
        index += 1
    rssfeed = fg.rss_str(pretty=True)
    s3.Object(bucket, 'rss/{0}'.format(file_name)).put(Body=rssfeed, ContentType="application/xml")
Beispiel #16
0
def feed(column_id):
    api = Api(column_id)

    with request.urlopen(api.info) as stream:
        result = stream.read().decode('utf-8')

    if not result:
        return '', 404

    info = json.loads(result)

    with request.urlopen(api.posts) as stream:
        result = stream.read().decode('utf-8')
        entries = json.loads(result)

    fg = FeedGenerator()
    fg.id(str(entries[0]['slug']))
    fg.title(info['name'])
    fg.language('zh_CN')
    fg.icon(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 's'))
    fg.logo(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 'l'))
    fg.description(info['intro'])
    fg.author(dict(name=info['creator']['name']))
    fg.link(href=api.base_url + info['url'], rel='alternate')
    for entry in entries:
        fe = fg.add_entry()
        fe.id(entry['url'])
        fe.title(entry['title'])
        fe.published(entry['publishedTime'])
        fe.updated(entry['publishedTime'])
        fe.author(dict(name=entry['author']['name']))
        fe.link(href=api.base_url + entry['url'], rel='alternate')
        fe.content(entry['content'])

    return fg.atom_str(pretty=True)
Beispiel #17
0
def get_feed(atom=False):
    fg = FeedGenerator()
    domain = get_domain()
    items = get_posts({"limit": "10"}, full=True)["results"]
    fg.id("http://%s/"%domain)
    fg.title("Blog do MatrUFSC2")
    fg.description("Feed do blog do MatrUFSC2, onde noticias e novos recursos sao anunciados primeiro!")
    fg.language('pt-BR')
    fg.link({"href":"/blog/feed","rel":"self"})
    fg.updated(items[0]["posted_at"].replace(tzinfo=pytz.UTC))
    for item in items:
        entry = fg.add_entry()
        entry.title(item["title"])

        tree = html.fromstring(item["summary"])
        cleaner = Cleaner(allow_tags=[])
        tree = cleaner.clean_html(tree)

        text = tree.text_content()
        entry.description(text, True)
        entry.link({"href":item["link"],"rel":"self"})
        entry.content(item["body"])
        entry.published(item["posted_at"].replace(tzinfo=pytz.UTC))
        entry.updated(item["posted_at"].replace(tzinfo=pytz.UTC))
        entry.category({"label": item["category"]["title"], "term": item["category"]["slug"]})
        entry.id(item["id"])
    if atom:
        return fg.atom_str(pretty=True)
    else:
        return fg.rss_str(pretty=True)
Beispiel #18
0
def run(folder, url):
    from feedgen.feed import FeedGenerator

    fg = FeedGenerator()

    head, tail = os.path.split(folder)

    title = tail.decode("utf-8")
    fg.id(str(uuid.uuid4()))
    fg.title(title)

    fg.link(href="{0}/rss.xml".format(url), rel="self")
    fg.description(u"Audiobook `{0}` generated with rssbook".format(title))

    fg.load_extension("podcast")
    for item in sorted(os.listdir(folder)):
        if os.path.splitext(item)[1] == ".mp3":
            get_node(os.path.join(folder, item))

        fullpath = os.path.join(folder, item)
        fe = fg.add_entry()
        fe.id(str(uuid.uuid4()))
        fe.title(title)
        fe.description(item)

        fe.link(
            href="{0}/{1}".format(url, item), rel="enclosure", type="audio/mpeg", length=str(os.stat(fullpath).st_size)
        )

    fg.rss_file(os.path.join(folder, "rss.xml"))
Beispiel #19
0
	def GET(self):
		cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
		fg = FeedGenerator()
		#TODO create icon
		# fg.icon('http://www.det.ua.pt')
		fg.id(config.get('rss','id'))
		fg.title(config.get('rss','title'))
		fg.subtitle(config.get('rss','subtitle'))
		fg.description(config.get('rss','description'))
		fg.author({'name': config.get('rss','author_name'), 'email':config.get('rss','author_email')})
		fg.language(config.get('rss','language'))
		fg.link(href=config.get('rss','href'), rel='related')

		client = EmailClient()

		for msgn in reversed(client.listBox(config.get('imap','mailbox'))[:config.getint('rss','maxitems')]):
			cherrypy.log("RSS Entry: "+msgn)
			em = client.getEMail(msgn)
			entry = fg.add_entry()
			entry.title(em['subject'])
			entry.author({'name': em['From']['name'], 'email': em['From']['email']})
			entry.guid(config.get("main","baseurl")+'news/'+msgn)
			entry.link({'href':config.get("main","baseurl")+'news/'+msgn, 'rel':'alternate'})
			entry.pubdate(em['date'])
			entry.content(em['body'])
		return	fg.rss_str(pretty=True)
Beispiel #20
0
    def feed(self, feed_title, title, content, url, published=None, summary=None,
             enclosure=None, media_thumbnail=None):
        feed = FeedGenerator()
        feed.title(feed_title)
        feed.description(faker.sentence())
        feed.link({'href': WP_FEED_URL})

        entry = feed.add_entry()
        entry.title(title)
        entry.link({'href': url})
        entry.author(name=faker.name())
        entry.content(content, type="cdata")
        if summary:
            entry.description(summary)
        if enclosure:
            entry.enclosure(url=enclosure['url'],
                            type=enclosure['type'],
                            length=str(faker.pyint()))
        if media_thumbnail:
            feed.load_extension('media')
            entry.media.thumbnail({'url': media_thumbnail})
        tz = pytz.timezone(faker.timezone())
        published = published or faker.date_time(tzinfo=tz)
        entry.published(published)
        entry.updated(faker.date_time_between(start_date=published, tzinfo=tz))

        return feed.rss_str().decode('utf8')
class TestExtensionDc(unittest.TestCase):

    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('dc')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_entryLoadExtension(self):
        fe = self.fg.add_item()
        try:
            fe.load_extension('dc')
        except ImportError:
            pass  # Extension already loaded

    def test_elements(self):
        for method in dir(self.fg.dc):
            if method.startswith('dc_'):
                m = getattr(self.fg.dc, method)
                m(method)
                assert m() == [method]

        self.fg.id('123')
        assert self.fg.atom_str()
        assert self.fg.rss_str()
Beispiel #22
0
    def build_feed(self):
        "Build the feed given our existing URL"
        # Get all the episodes
        page_content = str(requests.get(self.url).content)
        parser = BassdriveParser()
        parser.feed(page_content)
        links = parser.get_links()

        # And turn them into something usable
        fg = FeedGenerator()
        fg.id(self.url)
        fg.title(self.title)
        fg.description(self.title)
        fg.author({'name': self.dj})
        fg.language('en')
        fg.link({'href': self.url, 'rel': 'alternate'})
        fg.logo(self.logo)

        for link in links:
            fe = fg.add_entry()
            fe.author({'name': self.dj})
            fe.title(link[0])
            fe.description(link[0])
            fe.enclosure(self.url + link[1], 0, 'audio/mpeg')

            # Bassdrive always uses date strings of
            # [yyyy.mm.dd] with 0 padding on days and months,
            # so that makes our lives easy
            date_start = link[0].find('[')
            date_str = link[0][date_start:date_start+12]
            published = datetime.strptime(date_str, '[%Y.%m.%d]')
            fe.pubdate(UTC.localize(published))
            fe.guid((link[0]))

        return fg
Beispiel #23
0
    def export_feed(self, output):
        fg = FeedGenerator()
        fg.load_extension('podcast')
        fg.podcast.itunes_category('Religion & Spirituality', 'Christianity')
        fg.podcast.itunes_image("%s/icon.png" % URL_BASE)

        fg.title('JW.ORG Magazines')
        fg.description('Combined Feed of Watchtower (public), Watchtower (study), and Awake! in English from jw.org.')
        fg.link(href="%s/%s" % (URL_BASE, output), rel='self')

        manifest = self._load()
        entries = []
        for lang, mnemonics in manifest.items():
            for mnemonic, issues in mnemonics.items():
                for issue, data in issues.items():
                    entries.append((issue, data))

        for issue, entry in sorted(entries, key=lambda i: i[0], reverse=True):
            fe = fg.add_entry()

            fe.id( entry['hash'] )
            fe.title( entry['title'] )
            fe.description( entry['title'] )
            fe.published( pytz.utc.localize( entry['created_on'] ) )
            url = "%s/%s" % (URL_BASE, os.path.basename(entry['file']))
            mime = 'audio/mpeg'
            fe.enclosure(url, str(entry['duration']), mime)
            fe.link(href=url, type=mime)
        fg.rss_str(pretty=True)
        fg.rss_file(os.path.join(CACHE_DIR, output))
Beispiel #24
0
def gen_feed(user, base_url, path, debug=False):
    # Create feed
    feed = FeedGenerator()
    feed.id(urlparse.urljoin(base_url, user + '.xml'))
    feed.title('Snapchat story for ' + user)
    feed.link( href=urlparse.urljoin(base_url, user + '.xml'), rel='self' )
    feed.language('en')
    feed.description('Snapchat media')


    # Iterate through files in path, sort by unix timestamp (newest first), then add to feed
    files = sorted(os.listdir(path), reverse=True)

    for filename in files:
        split = filename.split('~')

        if split[0] != user:
            continue
        
        if os.path.splitext(filename)[1] in ['.mp4', '.jpg']:
            entry = feed.add_entry()
            entry.id(urlparse.urljoin(base_url, filename))
            entry.link(href=urlparse.urljoin(base_url, filename))
            entry.title(filename)

    
    # Write feed to disk
    feed.rss_file(os.path.join(path, user + '.xml'))
    date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")

    if debug:
        print('{0}  Regenerated {1}'.format(date, urlparse.urljoin(base_url, 
                                                               user + '.xml')))
Beispiel #25
0
  def makeRss(self):
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.id('http://hypecast.blackmad.com/' + self.mode)
    fg.title('Hype Machine Robot Radio: ' + self.mode)
    fg.author( {'name':'David Blackmad','email':'*****@*****.**'} )
    fg.logo('http://dump.blackmad.com/the-hype-machine.jpg')
    fg.language('en')
    fg.link(href='http://hypecast.blackmad.com/' + self.mode)
    fg.description('Hype Machine Robot Radio: ' + self.mode)

    description = ' <br/>'.join(['%s. %s' % (index + 1, self.mk_song_id(s)) for index, s in enumerate(self.songs)])

    fe = fg.add_entry()
    fe.title(self.track_name)
    fe.description(description)
    fe.id(self.filename)
    # add length
    print(self.relative_dir)
    print(self.filename)
    fe.enclosure(url = 'http://hypecast.blackmad.com/%s' % (self.filename), type="audio/mpeg")

    rss_str = fg.rss_str()
    newItem = ET.fromstring(rss_str)[0].find('item')
    out = open(self.get_filename('xml'), 'w')
    out.write(ET.tostring(newItem))
    out.close()
    self.updateRss()
Beispiel #26
0
def feedgen():
	with lite.connect(db) as conn:
		conn.row_factory = dict_factory
		#conn.row_factory = lite.Row
		query = ("select * from podcast_items order by date(pubdate) DESC;")
		cursor = conn.cursor()
		cursor.execute(query)
		data = cursor.fetchall()
		# now process to xml:
		
		fg = FeedGenerator()
		fg.title('Teaching Podcast - Open Door')
		fg.description('''We are a family of sojourners learning to walk in the way of Jesus for the good of the world (www.opendooreastbay.com)''')
		fg.load_extension('podcast')
		fg.language('en-US')
		fg.link(href='http://podcast.opendooreastbay.com/', rel='self', type="application/rss+xml")
		fg.podcast.itunes_author('Open Door East Bay')
		fg.podcast.itunes_category('Religion & Spirituality')
		fg.podcast.itunes_image('http://static1.squarespace.com/static/54c9a274e4b0d8961be0d970/t/555bb45ee4b051f7c699813d/1436299699944/1500w/od-emblem-blue.jpg')
		fg.podcast.itunes_subtitle('Open Door Teachings')
		fg.podcast.itunes_summary('''We are a family of sojourners learning to walk in the way of Jesus for the good of the world (www.opendooreastbay.com)''')
		fg.podcast.itunes_explicit('no')
		fg.podcast.itunes_owner(name='Open Door East Bay', email='*****@*****.**')
		
		fg.podcast.itunes_new_feed_url('http://podcast.opendooreastbay.com') #to move itunes feed to a new url
		
		fg.link(href='http://www.opendooreastbay.com', rel='alternate')
		for item in data:
			fe = fg.add_entry()
			fe.id(item['enclosure_url'])
			fe.podcast.itunes_author(item['itunes_author'])
			fe.podcast.itunes_subtitle(item['itunes_subtitle'])
			fe.podcast.itunes_explicit(item['itunes_explicit'])
			fe.podcast.itunes_image(item['itunes_image'])
			fe.podcast.itunes_duration(item['itunes_duration'])
			fe.published(item['pubDate'])
			fe.link(href=item['link'], rel='enclosure')
			fe.title(item['title'])
			fe.description(item['itunes_subtitle'])
			fe.enclosure(item['enclosure_url'], item['enclosure_length'], item['enclosure_type'])
		fg.rss_str(pretty=True)
		fg.rss_file('podcast.xml')
		print "wrote xml!"
		return "wrote xml!"
Beispiel #27
0
def generateRss(fileList, directoryPath, baseUrl):

    fg = FeedGenerator()

    fg.load_extension('podcast')

    #Setup Feed
    fg.title(Config.podcastTitle)
    baseUrl = Config.baseUrl
    feed = baseUrl + '/rss'
    fg.link(href=feed)
    fg.description(Config.podcastDescription)
    fg.language('en')
    mainImage = baseUrl + '/files?filename=' + Config.mainImage
    fg.image(mainImage)
    #using Technology as other tags like Christianity wont validate
    fg.podcast.itunes_category('Technology', 'Podcasting')
    fg.podcast.itunes_owner(name=Config.ownerName, email=Config.ownerEmail)

    #Setup episodes
    for i in fileList:
        fe = fg.add_entry()
        mp3 = directoryPath + i['title']

        # for local files
        # stat = os.stat(mp3)
        # size = os.path.getsize(mp3)
        # mp3File = baseUrl + '/files?filename=' + i['title']
        # fe.enclosure(mp3File, str(size) , 'audio/mp3')

        fe.enclosure(i['url'], str(i['size']), 'audio/mp3')
        fe.title(i['title'])
        descriptionText = 'Authors: ' + i['authors']
        fe.description(descriptionText)
        link = baseUrl + '/files?filename=' + i['title']
        fg.link(href=i['url'])
        fe.podcast.itunes_explicit('no')
        image = baseUrl + '/files?filename=' + Config.mainImage
        fe.podcast.itunes_image(image)

    #Save Rss
    fg.rss_str(pretty=True)
    saveLocation = directoryPath + 'podcast.xml'
    fg.rss_file(saveLocation)
Beispiel #28
0
def notices_feed(feed_type):
    if feed_type not in ["atom", "rss"]:
        flask.abort(404)

    url_root = flask.request.url_root
    base_url = flask.request.base_url

    feed = FeedGenerator()
    feed.generator("Feedgen")

    feed.id(url_root)
    feed.copyright(
        f"{datetime.now().year} Canonical Ltd. "
        "Ubuntu and Canonical are registered trademarks of Canonical Ltd.")
    feed.title("Ubuntu security notices")
    feed.description("Recent content on Ubuntu security notices")
    feed.link(href=base_url, rel="self")

    def feed_entry(notice, url_root):
        _id = notice.id
        title = f"{_id}: {notice.title}"
        description = notice.details
        published = notice.published
        notice_path = flask.url_for(".notice", notice_id=notice.id).lstrip("/")
        link = f"{url_root}{notice_path}"

        entry = FeedEntry()
        entry.id(link)
        entry.title(title)
        entry.description(description)
        entry.link(href=link)
        entry.published(f"{published} UTC")
        entry.author({"name": "Ubuntu Security Team"})

        return entry

    notices = (db_session.query(Notice).order_by(desc(
        Notice.published)).limit(10).all())

    for notice in notices:
        feed.add_entry(feed_entry(notice, url_root), order="append")

    payload = feed.atom_str() if feed_type == "atom" else feed.rss_str()
    return flask.Response(payload, mimetype="text/xml")
Beispiel #29
0
    def _build_feed(self):
        router = Router(self._config)
        fp = filterPlaceholders

        fg = FeedGenerator()
        fg.id(self._config.site_prefix)
        fg.title(self._config.site_name)
        fg.author({
            'name': fp(self._config.author),
            'email': fp(self._config.email)
        })
        fg.link(href=self._config.site_prefix, rel='alternate')
        fg.logo(fp(self._config.site_logo))
        fg.subtitle(fp(self._config.description))
        fg.description(fp(self._config.description) or ' ')
        fg.language(fp(self._config.language))
        fg.lastBuildDate(moment.now().locale(self._config.locale).date)
        fg.pubDate(moment.now().locale(self._config.locale).date)

        for post in self._posts[:10]:
            meta = post.meta
            fe = fg.add_entry()
            fe.title(meta['title'])
            fe.link(href=router.gen_permalink_by_meta(meta))
            fe.guid(router.gen_permalink_by_meta(meta), True)
            fe.pubDate(meta['date'].date)
            fe.author({
                'name': meta['author'],
                'uri': fp(self._config.author_homepage),
                'email': fp(self._config.email)
            })
            fe.content(post.parsed)

        if not os.path.exists(
                unify_joinpath(self._config.build_dir, 'feed/atom')):
            os.makedirs(unify_joinpath(self._config.build_dir, 'feed/atom'))

        fg.rss_file(unify_joinpath(self._config.build_dir, 'feed/index.xml'))
        fg.rss_file(unify_joinpath(self._config.build_dir, 'feed/index.html'))
        fg.atom_file(
            unify_joinpath(self._config.build_dir, 'feed/atom/index.xml'))
        fg.atom_file(
            unify_joinpath(self._config.build_dir, 'feed/atom/index.html'))
Beispiel #30
0
    def get_rss_by_show_id(self, show_id):
        show = self.get_show_by_show_id(show_id)
        if show is None:
            warnings.warn("get_show_by_show_id({}) returned None".format(show_id))
            return None
        eps = self.get_episodes_by_show_id(show_id)
        if eps is None:
            warnings.warn("get_episodes_by_show_id({}) returned None".format(show_id))

        fg = FeedGenerator()
        fg.load_extension("podcast")
        fg.id(show_id)
        fg.title(show["name"])
        if "href" in show:
            fg.link(href=show["href"], rel="via")
        if "images" in show and show["images"]:
            fg.logo(show["images"][0]["url"])
        if "languages" in show and show["languages"]:
            fg.language(show["languages"][0])
        fg.description(show.get("description", None))

        for ep in eps:
            if "external_urls" not in ep or not ep["external_urls"]:
                continue
            url = None
            for key, value in ep["external_urls"].items():
                if url is None or not key == "spotify":
                    url = value
            fe = fg.add_entry(order="append")
            fe.title(ep["name"])
            fe.id(ep["id"])
            fe.description(ep.get("description", None))
            fe.link(href=url)
            try:
                published = datetime_parse(ep["release_date"])
                if published.tzinfo is None:
                    published = published.astimezone(timezone.utc)
                fe.published(published)
            except Exception:
                pass
            if "duration_ms" in ep:
                fe.podcast.itunes_duration(int(ep["duration_ms"] / 1000))
        return fg.rss_str(pretty=True)
Beispiel #31
0
def generate_feed(key: str, title: str, description: str, link: str):
    feed = FeedGenerator()
    feed.title(title)
    feed.description(description)
    feed.link(href=link)
    feed.language("en")

    appids = db.redis_conn.zrevrange(key, 0, 10, withscores=True)
    apps = [(db.get_json_key(f"apps:{appid[0]}"), appid[1])
            for appid in appids]

    for app, timestamp in reversed(apps):
        # sanity check: if index includes an app, but apps:ID is null, skip it
        if not app:
            continue

        # sanity check: if application doesn't have the name field, skip it
        if not app.get("name"):
            continue

        entry = feed.add_entry()
        entry.title(app["name"])
        entry.link(href=f"https://flathub.org/apps/details/{app['id']}")

        timestamp = int(timestamp)
        entry_date = datetime.utcfromtimestamp(timestamp).strftime(
            "%a, %d %b %Y %H:%M:%S")
        entry.pubDate(f"{entry_date} UTC")

        content = [
            '<img src="https://dl.flathub.org/repo/appstream/x86_64/icons/128x128/{}.png">'
            .format(app["id"]),
            f"<p>{app['summary']}</p>",
        ]

        if description := app.get("description"):
            content.append(f"<p>{description}</p>")

        content.append("<h3>Additional information:</h3>")
        content.append("<ul>")

        if developer_name := app.get("developer_name"):
            content.append(f"<li>Developer: {developer_name}</li>")
Beispiel #32
0
def rss():
    fg = FeedGenerator()
    fg.title('CloudWalk DevSecOps test')
    fg.description('A RSS Feed for HTTP and TCP service')
    fg.docs('')
    fg.generator('')
    fg.link(href='http://example.com')

    with open('log.txt') as f:
        for line in f.readlines():
            info = line.replace('\n', '').split(';')
            fe = fg.add_entry()
            fe.title(f"{info[1]}")
            fe.pubDate(f"{info[0]} GTM-3")
            fe.description(f"server: {info[2]} port:{info[3]}")

    response = make_response(fg.rss_str())
    response.headers.set('Content-type', 'application/rss+xml')
    return response
Beispiel #33
0
class Feed:
    # create the feed
    def __init__(self, url):
        self.feed = FeedGenerator()
        self.feed.title("Comic Subs")
        self.feed.link(href=url)
        self.feed.description("The Comics Subscription Feed")

    # add a chapter to the feed
    def add_chapter(self, chapter):
        fe = self.feed.add_entry()
        fe.title(chapter.title)
        fe.guid(chapter.url)
        fe.link(href=chapter.url)
        fe.pubdate(chapter.published.replace(tzinfo=utc))

    # return the feed's rss
    def rss(self):
        return self.feed.rss_str()
Beispiel #34
0
def saveFeed(listings, title, path):

    url = githubRepoURL + title + ".xml"

    # Create a feed generator
    fg = FeedGenerator()

    # Create the feed's title
    fg.id(url)
    fg.title(title)
    fg.author({'name': 'Ben Snell'})
    fg.description("NYC 2BR Apartment Listings in " + title)
    fg.link(href=url, rel='alternate')
    fg.language('en')
    time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "-05:00"
    fg.pubDate(time)
    fg.updated(time)

    for apt in listings:

        e = fg.add_entry()

        e.id(apt[0])
        e.title("$" + apt[1] + "  //  " + apt[4])
        e.link(href=apt[0])

        text = ""
        if apt[5] != "":
            imgs = apt[5].split(" ")
            for i in range(len(imgs)):
                text += "<img src=\"" + imgs[i] + "\" /> "
                if i == 0:
                    text += "<p>" + apt[8] + "</p>"
        else:
            text += "<p>" + apt[8] + "</p>"
        e.content(type="html", content=text)

        # This doesn't seem to work:
        e.pubDate(datetime2RSSString(clDate(apt[2])))
        e.updated(datetime2RSSString(clDate(apt[2])))

    fg.atom_str(pretty=True)
    fg.atom_file(path)
Beispiel #35
0
def rss():
    '''
	Reference - https://old.reddit.com/r/flask/comments/evjcc5/question_on_how_to_generate_a_rss_feed/
	'''

    # Default Parameters
    params = {'limit': 5, 'blacklist': list(), 'threshold': 0.8}

    if 'limit' in request.args:
        params['limit'] = int(request.args.get('limit'))
    if 'nowords' in request.args:
        params['blacklist'] = literal_eval(request.args.get('nowords'))
    if 'similar' in request.args:
        params['blacklist'] = (float(request.args.get('similar'))
                               if float(request.args.get('similar')) <= 1 else
                               0.8)

    fg = FeedGenerator()
    fg.title('Hacker News - Best')
    fg.description('Created for personal use by Parth Parikh')
    fg.link(href='https://pncnmnp.github.io')

    UTC = tz.gettz('UTC')

    articles = get_news(params)

    for article in articles:
        fe = fg.add_entry()
        fe.title(article['title'])

        try:
            fe.link(href=article['url'])
        except:
            fe.link(href=HN_LINK.format(str(article['id'])))

        fe.guid(str(article['id']), permalink=False)
        fe.author(name=article['by'])
        fe.published(datetime.datetime.fromtimestamp(article['time'], tz=UTC))

    response = make_response(fg.rss_str())
    response.headers.set('Content-Type', 'application/rss+xml')
    return response
def all_top_posts():
    url = 'http://localhost:2015/votes/all/top/{num}'
    url = url.format(num = 25)
    headers = {'content-type': 'application/json'}
    r = requests.get(url, headers=headers)
    print(str(r.status_code)+ "<<<-----======")
    data = r.json()
    post_id = data['data']

    fg = FeedGenerator()
    fg.id(url)
    fg.title('Microservice - view all top posts')
    fg.link( href=url, rel='alternate' )
    fg.description('The 25 top posts to any community')
    fg.language('en')

    for i in post_id:
        print("Does it break here?")
        url = 'http://localhost:2015/posts/all/post/{id}'
        url = url.format(id = i)
        headers = {'content-type': 'application/json'}
        r = requests.get(url, headers=headers)
        print("Or Here?")
        data = r.json()
        post_info = data['data']

        fe = fg.add_entry()
        fe.id(post_info['post_id'])
        fe.author({'name': post_info['author'], 'email':""})
        fe.title(post_info['title'])
        fe.description(post_info['community'])
        date = datetime.fromtimestamp(post_info['date'])
        date = date.replace(tzinfo=pytz.utc)
        print(date)
        fe.published(date)
        fe.content(content=post_info['text'])
        if 'url' in post_info:
            fe.source(url=post_info['url'], title=post_info['title'])

    rssfeed  = fg.rss_str(pretty=True) # Get the RSS feed as string
    resp = flask.Response(rssfeed, mimetype='text/xml', status = 200, content_type = 'application/rss+xml; charset=UTF-8')
    return resp
Beispiel #37
0
def user_rss(service, id):
    cursor = get_cursor()
    query = "SELECT * FROM posts WHERE \"user\" = %s AND service = %s "
    params = (id, service)

    query += "ORDER BY added desc "
    query += "LIMIT 10"

    cursor.execute(query, params)
    results = cursor.fetchall()

    cursor3 = get_cursor()
    query3 = "SELECT * FROM lookup WHERE id = %s AND service = %s"
    params3 = (id, service)
    cursor3.execute(query3, params3)
    results3 = cursor.fetchall()
    name = results3[0]['name'] if len(results3) > 0 else ''

    fg = FeedGenerator()
    fg.title(name)
    fg.description('Feed for posts from ' + name + '.')
    fg.id(f'http://{request.headers.get("host")}/{service}/user/{id}')
    fg.link(href=f'http://{request.headers.get("host")}/{service}/user/{id}')
    fg.generator(generator='Kemono')
    fg.ttl(ttl=40)

    for post in results:
        fe = fg.add_entry()
        fe.title(post['title'])
        fe.id(
            f'http://{request.headers.get("host")}/{service}/user/{id}/post/{post["id"]}'
        )
        fe.link(
            href=
            f'http://{request.headers.get("host")}/{service}/user/{id}/post/{post["id"]}'
        )
        fe.content(content=post["content"])
        fe.pubDate(pytz.utc.localize(post["added"]))

    response = make_response(fg.atom_str(pretty=True), 200)
    response.headers['Content-Type'] = 'application/rss+xml'
    return response
Beispiel #38
0
def make_csdn_rss():
    fg = FeedGenerator()
    fg.id('http://blog.csdn.net')
    fg.title(u'CSDN 博客频道')
    fg.author({
        'name': 'pfchai',
    })
    fg.link(href='http://blog.csdn.net', rel='self')
    fg.description(u"csdn 首页博客")

    csdn = CSDN()
    for item in csdn.get_item():
        fe = fg.add_entry()
        fe.id(item['link'])
        fe.title(item['title'])
        fe.author({'name': item['author']})
        fe.description(item['description'])
        fe.content(item['description'])

    return fg
Beispiel #39
0
def feed(feed_id):
    user = get_user_by_feed(feed_id)
    if user:
        generator = FeedGenerator()
        generator.id(str(feed_id))
        generator.title("Vulnfeed Feed")
        generator.description('Vulnfeed data in RSS form')
        generator.link(href=request.url)
        generator.language('en')

        for scored_item in user.last_scored_list:
            entry = generator.add_entry()
            entry.id(scored_item['report']['id'])
            entry.title(scored_item['report']['title'])
            entry.description(scored_item['report']['contents'])
            entry.link(href=scored_item['report']['link'])

        return generator.rss_str(pretty=True)
    else:
        return "Not found"
Beispiel #40
0
def gen_feed():
    fg = FeedGenerator()
    fg.id(f"{ID}")
    fg.title(f"{USERNAME} notes")
    fg.author({"name": USERNAME, "email": "*****@*****.**"})
    fg.link(href=ID, rel="alternate")
    fg.description(f"{USERNAME} notes")
    fg.logo(ME.get("icon", {}).get("url"))
    fg.language("en")
    for item in DB.activities.find({
            "box": Box.OUTBOX.value,
            "type": "Create"
    },
                                   limit=50):
        fe = fg.add_entry()
        fe.id(item["activity"]["object"].get("url"))
        fe.link(href=item["activity"]["object"].get("url"))
        fe.title(item["activity"]["object"]["content"])
        fe.description(item["activity"]["object"]["content"])
    return fg
Beispiel #41
0
def index_rss():
    if current_user.is_authenticated:
        links = trending_links(current_user.subscribed_feed_ids)
    else:
        links = trending_links(current_app.config["DEFAULT_FEEDS"])
    paginated_ids, _, _ = paginate(links, 30)
    links = Link.by_ids(paginated_ids)

    # TODO maybe do through fake feed (that's what reddit does and it actually makes sense)
    fg = FeedGenerator()
    fg.id("https://localhost:5000/")
    fg.title("Newsfeed")
    fg.link(href="http://localhost:5000/", rel="self")
    fg.description("Global news agrregator!")
    fg.language("en")

    for entry in rss_entries(links):
        fg.add_entry(entry)

    return fg.rss_str(pretty=True)
Beispiel #42
0
 def rss(self, name):
     cherrypy.response.headers['Content-Type'] = 'application/rss+xml'
     fg = FeedGenerator()
     cursor.execute(
         "SELECT * FROM RSS WHERE source = %s ORDER BY filedate DESC;",
         (name, ))
     fg.id(config.get('Options', 'rss_url') + name)
     fg.title(config.get(name, 'rss_title'))
     fg.description(config.get(name, 'rss_desc'))
     fg.link(href=config.get('Options', 'rss_url') + name, rel='alternate')
     for row in cursor:
         fe = fg.add_entry()
         fe.id('https://drive.google.com/uc?id=' + row[1] +
               '&export=download')
         fe.title(
             config.get(name, 'feed_entry_prepend') + row[2] +
             config.get(name, 'feed_entry_postpend'))
         fe.description(row[2] + ' - https://drive.google.com/uc?id=' +
                        row[1] + '&export=download')
     return fg.rss_str(pretty=True)
Beispiel #43
0
def get_blog_cn(url, bucket, key):
    blogs = requests.get(url)
    fg = FeedGenerator()
    fg.title('AWS Blog RSS')
    fg.link(href='https://aws.amazon.com/blogs')
    fg.description('AWS Blog RSS')

    for blog in blogs.json()['items']:
        if blog['additionalFields']['slug'] == 'all': continue

        fe = fg.add_entry()
        fe.link(href=blog['additionalFields']['link'])
        fe.title(blog['additionalFields']['title'])
        fe.id(blog['id'])
        fe.description(blog['additionalFields']['slug'])
        fe.pubDate(blog['additionalFields']['modifiedDate'])

    response = upload_to_s3(fg, bucket, key)

    return response
Beispiel #44
0
def podcast_feed():
    logo_url = url_for("static", filename="wpclogo_big.png", _external=True)

    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.podcast.itunes_category('Technology', 'Podcasting')
    fg.podcast.itunes_image(logo_url)
    fg.author({'name': 'Nathan Kellert', 'email': '*****@*****.**'})
    fg.link(href='http://watchpeoplecode.com/podcast_feed.xml', rel='self')
    fg.title('WPC Coders Podcast')
    fg.description(
        'WPC Coders Podcast is a weekly peek into the lives of developers and the WatchPeopleCode community. Our goal is to keep our listeners entertained by giving them new and interesting insights into our industry as well as awesome things happening within our own community. Here, you can expect hear about some of the latest news, tools, and opportunities for developers in nearly every aread of our industry. Most importantly, we hope to have some fun and a few laughs in ways only other nerds know how.'
    )  # NOQA

    episodes = [
        ('ep1.mp3', 'Episode 1', datetime(2015, 02, 21, 23),
         'Learn all about the WPC hosts, and where we came from in Episode 1!'
         ),
        ('ep2.mp3', 'Episode 2', datetime(2015, 02, 28, 23),
         'This week we cover your news, topics and questions in episode 2!'),
        ('ep3.mp3', 'Episode 3', datetime(2015, 03, 07, 23),
         "On todays podcast we talk to WatchPeopleCode's founder Alex Putilin. Hear about how the reddit search engine thousands watched him write. Also, hear the inside scoop of how WatchPeopleCode got started!"
         ),  # NOQA
        ('ep4.mp3', 'Episode 4', datetime(2015, 03, 14, 23),
         "This week we talk to FreeCodeCamps Quincy Larson(http://www.freecodecamp.com) about their project that combines teaching new developers how to code and completing projects for non-profits! Lets find out how this group of streamers code with a cause!"
         )
    ]  # NOQA

    for epfile, eptitle, epdate, epdescription in episodes[::-1]:
        epurl = "https://s3.amazonaws.com/wpcpodcast/{}".format(epfile)
        fe = fg.add_entry()
        fe.id(epurl)
        fe.title(eptitle)
        fe.description(epdescription)
        fe.podcast.itunes_image(logo_url)
        fe.pubdate(epdate.replace(tzinfo=pytz.UTC))
        fe.enclosure(epurl, 0, 'audio/mpeg')

    return Response(response=fg.rss_str(pretty=True),
                    status=200,
                    mimetype='application/rss+xml')
Beispiel #45
0
def saveFeed(listings, title, path):

    url = githubRepoURL + title + ".xml"

    # Create a feed generator
    fg = FeedGenerator()

    # Create the feed's title
    fg.id(url)
    fg.title(title)
    fg.author({'name': 'Ben Snell'})
    fg.description("Art Show Open Call Opportunities")
    fg.link(href=url, rel='alternate')
    fg.language('en')
    time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "-05:00"
    fg.pubDate(time)
    fg.updated(time)

    for item in listings:

        e = fg.add_entry()

        e.id(item["ID"])
        # Get a clearer title
        thisTitle = getShortDate(item["Application Deadline"]) + item["Title"]
        e.title(thisTitle)
        # for key, value in item.items():
        # print(key, value);
        # print(item["url"])
        # if "url" in item:
        e.link(href=item["url"])

        text = getHtmlFormattedListing(item)
        e.content(type="html", content=text)

        # This doesn't seem to work:
        # e.pubDate( datetime2RSSString(clDate(apt[2])) )
        # e.updated( datetime2RSSString(clDate(apt[2])) )

    fg.atom_str(pretty=True)
    fg.atom_file(path)
Beispiel #46
0
    def __init__(self):
        feeds = RssFetch.get_feeds()

        fg = FeedGenerator()
        fg.title("Sascha's Reader's Digest")
        fg.link(href='https://www.sascha-curth.de', rel='alternate')
        fg.description('These news catched my attention.')
        for item in sorted(feeds,
                           key=lambda k: k['FEED_PUBLISHED'],
                           reverse=False):
            fe = fg.add_entry()
            fe.id(str(item['id']))
            fe.title(item['FEED_TITLE'])
            fe.link(href=item['FEED_LINK'], rel="alternate")
            author = '{"email": "%s", "name":"%s" }' % (item['FEED_NAME'],
                                                        item['rss_link'])
            author = json.loads(author)
            fe.author(author)
            fe.pubDate(pytz.utc.localize(item['FEED_PUBLISHED']))
            fe.description(str(item['description']))

        sorted_fg = sorted(feeds,
                           key=lambda k: k['FEED_PUBLISHED'],
                           reverse=True)
        fg.rss_file('static/readers_digest_rss.xml')

        s3 = boto3.resource('s3')
        s3.meta.client.upload_file(CONFIG.get("AWS", "sourcefile"),
                                   CONFIG.get("AWS", "bucket"),
                                   CONFIG.get("AWS", "targetfile"))
        client = boto3.client('cloudfront')
        response = client.create_invalidation(
            DistributionId=CONFIG.get("AWS", "distribution"),
            InvalidationBatch={
                'Paths': {
                    'Quantity': 1,
                    'Items': ['/readers_digest_rss.xml'],
                },
                'CallerReference': str(time()).replace(".", "")
            })
        print(response)
Beispiel #47
0
    def _generate_feed(self):
        feed = FeedGenerator()

        feed.load_extension('podcast')
        feed.podcast.itunes_author(self.author)
        feed.podcast.itunes_category(self.category)
        feed.podcast.itunes_explicit(self.is_explicit)
        feed.podcast.itunes_image(f'{self.logo_uri}.{self.JPG_FILE_EXTENSION}')
        feed.podcast.itunes_owner(name=self.author, email=self.email)
        feed.podcast.itunes_subtitle(self.subtitle)
        feed.podcast.itunes_summary(self.description)

        feed.author(name=self.author, email=self.email)
        feed.description(self.description)
        feed.language(self.language)
        feed.link(href=self.website, rel='alternate')
        feed.logo(self.logo_uri)
        feed.subtitle(self.subtitle)
        feed.title(self.name)

        return feed
Beispiel #48
0
def lt3():
    news = scrapper.get_lt3_news()

    fg = FeedGenerator()

    fg.title(news['title'])
    fg.description(news['description'])
    fg.link(href=news['link'])

    for article in news['articles']:
        fe = fg.add_entry()
        fe.title(article['title'])
        fe.link(href=article['link'])
        fe.description(article['description'])
        fe.guid(article['link'], permalink=True)
        fe.author(name=article['author'])
        fe.pubDate(article['pubDate'])

    res = flask.make_response(fg.rss_str())
    res.headers.set('Content-Type', 'application/xml')
    return res
Beispiel #49
0
def render_feed(feed_id: str, plugin: Plugin, options: GlobalOptions,
                base_url: str):
    feed = plugin.get_feed(feed_id)
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.title(feed.title)
    fg.description(feed.description)
    fg.link(href=feed.link, rel='alternate')
    fg.image(options.icon or feed.image)
    fg.id(feed.feed_id)
    for item in reversed(feed.items):
        fe = fg.add_entry()
        fe.id(item.item_id)
        fe.title(item.title)
        fe.description(item.description)
        fe.pubDate(item.date)
        fe.podcast.itunes_image(item.image)
        fe.enclosure(generate_url(item, plugin, options, base_url),
                     item.content_length, item.content_type)
    return fg.rss_str(pretty=True) if options.format == 'rss' else fg.atom_str(
        pretty=True)
def rss():
    fg = FeedGenerator()
    fg.title('Feed title')
    fg.description('Feed Description')
    # Orignal link
    #fg.link(href='https://neighborly-client-v1.azurewebsites.net/')

    #Andrew's link
    fg.link(href='https://myneighborlyapiv1.azurewebsites.net/')

    response = requests.get(settings.API_URL + '/getAdvertisements')
    ads = response.json()

    for a in ads:
        fe = fg.add_entry()
        fe.title(a.title)
        fe.description(a.description)

    response = make_response(fg.rss_str())
    response.headers.set('Content-Type', 'application/rss+xml')
    return response
Beispiel #51
0
def rss(name: str, _id: str):
    fg = FeedGenerator()
    events_link = event_base_link(actor_name=name, actor_id=_id)
    fg.id(events_link)
    fg.title(name + "のイベント・ライブ情報一覧")
    fg.description(name + "のイベント・ライブ情報一覧")
    fg.link(href=events_link, rel='alternate')
    fg.language("ja")
    for event in cached_events(actor_name=name,
                               actor_id=_id,
                               events_cache=EVENTS_CACHE):
        fe = fg.add_entry()
        fe.id(event._id)
        fe.link(href=event_link(event._id))
        fe.title(f"{event.year}/{event.month}/{event.day} {event.title}")
        fe.pubDate(
            datetime(year=event.year,
                     month=event.month,
                     day=event.day,
                     tzinfo=pytz.timezone("Asia/Tokyo")))
    return fg.rss_str(pretty=True)
Beispiel #52
0
def feed(posts: Tuple[Post], kind: str) -> FeedGenerator:
    log.debug('generating %s feed', kind)
    fg = FeedGenerator()
    fg.title('beepb00p')
    fg.author(name='karlicoss', email='*****@*****.**')
    # TODO better description?
    fg.description('feed')

    bb = lambda x: f'https://beepb00p.xyz{x}'
    fg.id(bb(f'/{kind}.xml'))
    fg.link(rel='self', href=bb(f'/{kind}.xml'))
    fg.link(href=bb(''))
    if len(posts) > 0:
        dates = (p.date for p in posts)
        fg.updated(
            max(tz.localize(d) if d is not None else throw() for d in dates))

    # eh, apparnetly in adds items to the feed from bottom to top...
    for post in reversed(posts):
        fe = fg.add_entry()
        # not sure why id() doesn't allow to set permalink=True
        fe.guid(bb(post.url), permalink=True)
        fe.link(href=bb(post.url))
        fe.title(post.title)
        # TOOD FIXME meh.
        d = post.date
        assert d is not None
        td = tz.localize(d)
        fe.published(td)
        fe.updated(td)
        # TODO meh, later use proper update date...
        #
        # TODO use type=text/html for comparisons?
        # TODO meh
        if post.upid == 'infra_diagram':
            content = "Sorry, this post contains a huge diagram and isn't RSS friendly. It's best viewed on the website"
        else:
            content = post.body
        fe.content(content, type='html')
    return fg
Beispiel #53
0
def slack_history_to_feedgen(history):
    fg = FeedGenerator()

    fg.title(CONFIG["Feed"]["title"])
    fg.id(CONFIG["Feed"]["id"])
    fg.link(CONFIG["Feed"]["link"])
    fg.description(CONFIG["Feed"]["description"])

    for message in history["messages"]:
        message_id = message.get(
            "client_msg_id", f'{message.get("user")}--{message.get("ts")}')

        fe = fg.add_entry()
        fe.id(message_id)

        title = None
        summary = None
        link = []

        for file in message.get("files", []):
            title = title or file.get("title", None)

            link_href = file.get("url_private", None)
            link_rel = "alternate"
            link.append(dict(href=link_href, rel=link_rel, title=title))

        for attachment in message.get("attachments", []):
            title = title or attachment.get("title", None)
            summary = summary or attachment.get("text", None)

            link_href = attachment.get("from_url", None)
            link_rel = "alternate"
            link.append(dict(href=link_href, rel=link_rel, title=title))

        fe.title(title or "Untitled")
        fe.summary(summary)
        fe.content(message["text"])
        fe.link(link)

    return fg
def generate_feed(input_file, output_file):
    fg = FeedGenerator()
    fg.load_extension('podcast', rss=True)

    ## RSS tags
    # Required
    fg.title(TITLE)
    fg.link(href=LINK)
    fg.description(DESCRIPTION)
    # Optional
    fg.language('en')
    fg.image(url=IMAGE_URL, title=TITLE, link=LINK)
    fg.ttl(720)
    fg.webMaster(CONTACT['name'])
    now = datetime.datetime.now()
    tz = pytz.timezone('Europe/Amsterdam')
    fg.pubDate(tz.localize(now))
    # iTunes
    fg.podcast.itunes_author('Dan LeBatard')
    fg.podcast.itunes_category(itunes_category='Sports & Recreation',
                               itunes_subcategory='Professional')
    fg.podcast.itunes_image(itunes_image=IMAGE_URL)
    fg.podcast.itunes_explicit(itunes_explicit='clean')
    fg.podcast.itunes_owner(name=CONTACT['name'], email=CONTACT['email'])

    # Add items
    items = read_items(input_file)
    for item in items:
        fe = fg.add_entry()

        ## RSS tags
        fe.id(item['guid'])
        fe.title(item['title'])
        fe.description(item['description'])
        fe.enclosure(item['link'], 0, 'audio/mpeg')
        fe.pubdate(item['pubDate'])

    # Finish off the file
    fg.rss_str(pretty=True)
    fg.rss_file(output_file)
Beispiel #55
0
def make_feed(entries, yargs):
    """Take a list of (datetime, feedgen.entry)s and the program arguments 
    and create a feed file."""
    from feedgen.feed import FeedGenerator
    fg = FeedGenerator()

    # metadata
    fg.id(yargs.feed_link)
    fg.title(yargs.feed_title)
    fg.description(
        yargs.feed_description if yargs.feed_description else yargs.feed_title)
    fg.link(href=yargs.feed_link)

    # entries
    for ts, e in entries:
        fg.add_entry(e)

    # output
    if yargs.feed_type == "RSS":
        fg.rss_file(yargs.feed_path)
    elif yargs.feed_type == "Atom":
        fg.atom_file(yargs.feed_path)