Ejemplo n.º 1
0
class TestExtensionSyndication(unittest.TestCase):

    SYN_NS = {'sy': 'http://purl.org/rss/1.0/modules/syndication/'}

    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('syndication')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_update_period(self):
        for period_type in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'):
            self.fg.syndication.update_period(period_type)
            root = etree.fromstring(self.fg.rss_str())
            a = root.xpath('/rss/channel/sy:UpdatePeriod',
                           namespaces=self.SYN_NS)
            assert a[0].text == period_type

    def test_update_frequency(self):
        for frequency in (1, 100, 2000, 100000):
            self.fg.syndication.update_frequency(frequency)
            root = etree.fromstring(self.fg.rss_str())
            a = root.xpath('/rss/channel/sy:UpdateFrequency',
                           namespaces=self.SYN_NS)
            assert a[0].text == str(frequency)

    def test_update_base(self):
        base = '2000-01-01T12:00+00:00'
        self.fg.syndication.update_base(base)
        root = etree.fromstring(self.fg.rss_str())
        a = root.xpath('/rss/channel/sy:UpdateBase', namespaces=self.SYN_NS)
        assert a[0].text == base
Ejemplo n.º 2
0
    def export_feed(self, output):
        fg = FeedGenerator()
        fg.load_extension('podcast')
        fg.podcast.itunes_category('Religion & Spirituality', 'Christianity')
        fg.podcast.itunes_image("%s/icon.png" % URL_BASE)

        fg.title('JW.ORG Magazines')
        fg.description('Combined Feed of Watchtower (public), Watchtower (study), and Awake! in English from jw.org.')
        fg.link(href="%s/%s" % (URL_BASE, output), rel='self')

        manifest = self._load()
        entries = []
        for lang, mnemonics in manifest.items():
            for mnemonic, issues in mnemonics.items():
                for issue, data in issues.items():
                    entries.append((issue, data))

        for issue, entry in sorted(entries, key=lambda i: i[0], reverse=True):
            fe = fg.add_entry()

            fe.id( entry['hash'] )
            fe.title( entry['title'] )
            fe.description( entry['title'] )
            fe.published( pytz.utc.localize( entry['created_on'] ) )
            url = "%s/%s" % (URL_BASE, os.path.basename(entry['file']))
            mime = 'audio/mpeg'
            fe.enclosure(url, str(entry['duration']), mime)
            fe.link(href=url, type=mime)
        fg.rss_str(pretty=True)
        fg.rss_file(os.path.join(CACHE_DIR, output))
Ejemplo n.º 3
0
def _filter_fb_rss_feeed(url):
    parsed_feed = feedparser.parse(url)
    filtered_entries = filter(
        lambda x: ' shared a link: "' in x.title, parsed_feed.entries)

    fg = FeedGenerator()
    fg.id('https://fb-notifications-to-pocket.herokuapp.com/')
    fg.title('Facebook Notifications to Pocket')
    fg.author({'name': 'Pankaj Singh', 'email': '*****@*****.**'})
    fg.description(
        '''Filter FB notifications which contain a link and generate a new rss feed which will be used by IFTTT''')
    fg.link(href='https://fb-notifications-to-pocket.herokuapp.com/')

    for entry in filtered_entries:
        root = etree.HTML(entry.summary_detail.value)
        title = entry.title.split(" shared a link: ")[1].strip()[1:-2]
        author_name = entry.title.split(" shared a link: ")[0].strip()
        url = urlparse.parse_qs(
            urlparse.urlparse(root.findall(".//a")[-1].attrib["href"]).query)["u"][0]

        title = get_title_for_url(url) or title

        fe = fg.add_entry()
        fe.id(entry.id)
        fe.link(href=url)
        fe.published(entry.published)
        fe.author({'name': author_name})
        fe.title(title)

    return fg.atom_str(pretty=True)
Ejemplo n.º 4
0
def build_rss_feeds(blog_posts):
    feed = FeedGenerator()
    feed.load_extension("media", rss=True, atom=True)
    base = "https://whotracks.me"

    for post in blog_posts:
        if post["publish"]:
            entry = feed.add_entry()
            entry.id(f'{base}/blog/{post["filename"]}.html')
            entry.title(post["title"])
            entry.link(link={"href": f"{base}/blog/{post['filename']}.html"})
            entry.author({"name": post["author"]})
            entry.pubDate(
                datetime.strptime(post["date"],
                                  "%Y-%m-%d").replace(tzinfo=timezone("CET")))
            entry.description(post["subtitle"])
            entry.media.thumbnail(
                url=f'https://whotracks.me/static/img/{post["header_img"]}')

    feed.title("WhoTracksMe blog")
    feed.description("By the tech teams at Cliqz and Ghostery")
    feed.link(link={"href": f"{base}/blog.html"})

    feed.id("wtm")
    feed.language("en")
    feed.logo(f"{base}/static/img/who-tracksme-logo.png")

    feed.rss_file("_site/blog/feed.xml")
Ejemplo n.º 5
0
	def GET(self):
		cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
		fg = FeedGenerator()
		#TODO create icon
		# fg.icon('http://www.det.ua.pt')
		fg.id(config.get('rss','id'))
		fg.title(config.get('rss','title'))
		fg.subtitle(config.get('rss','subtitle'))
		fg.description(config.get('rss','description'))
		fg.author({'name': config.get('rss','author_name'), 'email':config.get('rss','author_email')})
		fg.language(config.get('rss','language'))
		fg.link(href=config.get('rss','href'), rel='related')

		client = EmailClient()

		for msgn in reversed(client.listBox(config.get('imap','mailbox'))[:config.getint('rss','maxitems')]):
			cherrypy.log("RSS Entry: "+msgn)
			em = client.getEMail(msgn)
			entry = fg.add_entry()
			entry.title(em['subject'])
			entry.author({'name': em['From']['name'], 'email': em['From']['email']})
			entry.guid(config.get("main","baseurl")+'news/'+msgn)
			entry.link({'href':config.get("main","baseurl")+'news/'+msgn, 'rel':'alternate'})
			entry.pubdate(em['date'])
			entry.content(em['body'])
		return	fg.rss_str(pretty=True)
Ejemplo n.º 6
0
def _gen_rss(event):
    """Generate RSS feed file and place in bucket when file uploaded to S3"""
    region = os.getenv('BUCKET_REGION')
    s3 = boto3.resource('s3')
    sbucket = s3.Bucket(event.bucket)
    dbucket = s3.Bucket(os.getenv('RSS_BUCKET'))
    fg = FeedGenerator()
    fg.title('Lesscast Uploads')
    fg.description('Created by lesscast')
    fg.link(href='https://{}.s3.{}.amazonaws.com/rss.xml'.format(
        dbucket.name, region))
    fg.load_extension('podcast')
    fg.podcast.itunes_category('Technology', 'Podcasting')
    keyfunc = operator.attrgetter('last_modified')
    iterator = filter(_get_valid_files, sbucket.objects.all())
    for objsum in sorted(iterator, key=keyfunc):
        app.log.info('Adding %s to feed', objsum.key)
        pub_url = 'https://{}.s3.{}.amazonaws.com/{}'.format(
            sbucket.name, region, objsum.key)
        obj = objsum.Object()
        acl = obj.Acl()
        acl.put(ACL='public-read')
        fe = fg.add_entry()
        fe.id(pub_url)
        fe.link(href=pub_url)
        fe.title(os.path.basename(obj.key.rstrip('.mp3')))
        fe.description('added by lesscast')
        fe.enclosure(pub_url, 0, 'audio/mpeg')
    rss_content = fg.rss_str(pretty=True)
    dbucket.put_object(ACL='public-read', Key='rss.xml', Body=rss_content)
    app.log.info('Complete')
def getcomments():
    req = requests.get('http://localhost/article',
                       json={"count": 10},
                       auth=('*****@*****.**', 'adminpassword'))
    value = req.json()
    fg = FeedGenerator()
    fg.title('A full feed')
    fg.link(href='http://localhost/feed/summary/comments')
    fg.subtitle('This is a cool feed!')
    fg.language('en')

    for x in value:
        split = x.get('articles_url', '').split('/')
        reqcomment = requests.get('http://localhost/comment/get/' + split[2],
                                  json={"count": 30},
                                  auth=('*****@*****.**', 'adminpassword'))
        fe = fg.add_entry()
        fe.content(x.get('articles_url', ''))
        if reqcomment.status_code == 200:
            for v in reqcomment.json():
                fe.category(term=v)
        else:
            fe.category(term="None")

    fg.rss_file('comments.xml')
    return "YOUR COMMENTS WAS CREATED comments.xml", 200
Ejemplo n.º 8
0
def podcast_feed():
    logo_url = url_for("static", filename="wpclogo_big.png", _external=True)

    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.podcast.itunes_category('Technology', 'Podcasting')
    fg.podcast.itunes_image(logo_url)
    fg.author({'name': 'Nathan Kellert', 'email': '*****@*****.**'})
    fg.link(href='http://watchpeoplecode.com/podcast_feed.xml', rel='self')
    fg.title('WPC Coders Podcast')
    fg.description('WPC Coders Podcast is a weekly peek into the lives of developers and the WatchPeopleCode community. Our goal is to keep our listeners entertained by giving them new and interesting insights into our industry as well as awesome things happening within our own community. Here, you can expect hear about some of the latest news, tools, and opportunities for developers in nearly every aread of our industry. Most importantly, we hope to have some fun and a few laughs in ways only other nerds know how.')  # NOQA

    episodes = [('ep1.mp3', 'Episode 1', datetime(2015, 02, 21, 23), 'Learn all about the WPC hosts, and where we came from in Episode 1!'),
                ('ep2.mp3', 'Episode 2', datetime(2015, 02, 28, 23), 'This week we cover your news, topics and questions in episode 2!'),
                ('ep3.mp3', 'Episode 3', datetime(2015, 03, 07, 23), "On todays podcast we talk to WatchPeopleCode's founder Alex Putilin. Hear about how the reddit search engine thousands watched him write. Also, hear the inside scoop of how WatchPeopleCode got started!"),  # NOQA
                ('ep4.mp3', 'Episode 4', datetime(2015, 03, 14, 23), "This week we talk to FreeCodeCamps Quincy Larson(http://www.freecodecamp.com) about their project that combines teaching new developers how to code and completing projects for non-profits! Lets find out how this group of streamers code with a cause!")]  # NOQA

    for epfile, eptitle, epdate, epdescription in episodes[::-1]:
        epurl = "https://s3.amazonaws.com/wpcpodcast/{}".format(epfile)
        fe = fg.add_entry()
        fe.id(epurl)
        fe.title(eptitle)
        fe.description(epdescription)
        fe.podcast.itunes_image(logo_url)
        fe.pubdate(epdate.replace(tzinfo=pytz.UTC))
        fe.enclosure(epurl, 0, 'audio/mpeg')

    return Response(response=fg.rss_str(pretty=True),
                    status=200,
                    mimetype='application/rss+xml')
Ejemplo n.º 9
0
 def generate_feed(page=1):
     feed = FeedGenerator()
     feed.id("https://pub.dartlang.org/feed.atom")
     feed.title("Pub Packages for Dart")
     feed.link(href="https://pub.dartlang.org/", rel="alternate")
     feed.link(href="https://pub.dartlang.org/feed.atom", rel="self")
     feed.description("Last Updated Packages")
     feed.author({"name": "Dart Team"})
     i = 1
     pager = QueryPager(int(page), "/feed.atom?page=%d",
                        Package.all().order('-updated'),
                        per_page=10)
     for item in pager.get_items():
         i += 1
         entry = feed.add_entry()
         for author in item.latest_version.pubspec.authors:
             entry.author({"name": author[0]})
         entry.title("v" + item.latest_version.pubspec.get("version") +\
             " of " + item.name)
         entry.link(link={"href": item.url, "rel": "alternate",
             "title": item.name})
         entry.id(
             "https://pub.dartlang.org/packages/" + item.name + "#" +\
             item.latest_version.pubspec.get("version"))
         entry.description(
             item.latest_version.pubspec
             .get("description", "Not Available"))
         readme = item.latest_version.readme
         if not readme is None:
             entry.content(item.latest_version.readme.render(), type='html')
         else:
             entry.content("<p>No README Found</p>", type='html')
     return feed
Ejemplo n.º 10
0
def write_podcast(show, podcast_dir, base_public_url, showlocal_tz):
    """Create the podcast file."""
    fg = FeedGenerator()
    fg.load_extension('podcast')

    url = "{}{}.xml".format(base_public_url, show.id)
    fg.id(url.split('.')[0])
    fg.title(show.name)
    fg.image(show.image_url)
    fg.description(show.description)
    fg.link(href=url, rel='self')

    # collect all mp3s for the given show
    all_mp3s = glob.glob(os.path.join(podcast_dir, "{}_*.mp3".format(show.id)))

    for filepath in all_mp3s:
        filename = os.path.basename(filepath)
        mp3_date = _get_date_from_mp3_path(filepath, showlocal_tz)
        mp3_size = os.stat(filepath).st_size
        mp3_url = base_public_url + filename
        mp3_id = filename.split('.')[0]
        title = "Programa del {0:%d}/{0:%m}/{0:%Y}".format(mp3_date)

        # build the rss entry
        fe = fg.add_entry()
        fe.id(mp3_id)
        fe.pubdate(mp3_date)
        fe.title(title)
        fe.enclosure(mp3_url, str(mp3_size), 'audio/mpeg')

    fg.rss_str(pretty=True)
    fg.rss_file(os.path.join(podcast_dir, '{}.xml'.format(show.id)))
Ejemplo n.º 11
0
def latestRss(userID):
    userID = userID.lower()

    shows = {}
    episodes = []
    today = date.today().strftime('%Y-%m-%d')
    for showID in series.getUserShowList(userID):
        shows[showID] = series.getShowInfo(userID, showID, withEpisodes=True, onlyUnseen=True)
        episodes.extend((showID, episode) for episode in shows[showID]['episodes'] if episode['airdate'] and airdateKey(episode['airdate']) < today)

    episodes.sort(key=episodeAirdateKey, reverse=True)

    feed = FeedGenerator()
    feed.id(userID)
    feed.title('%s\'s shows' % userID)
    feed.description('Unseen episodes')
    feed.link(href=request.url_root)
    feed.language('en')

    for showID, episode in episodes:
        entry = feed.add_entry()
        entry.id('%s/%s' % (showID, episode['episode_id']))
        entry.title('%s S%02dE%02d: %s' % (shows[showID]['name'], episode['season'], episode['episode'], episode['title']))

    return feed.rss_str(pretty=True)
Ejemplo n.º 12
0
class Feeder():
    def __init__( self, url, title='', feedURL='' ):
        scraper = None
        if url.startswith( "https://twitter.com/" ):
            scraper = TwitterScraper( url )
            if title == '':
                title = "Twitter: @" + url.split('/')[3]
        elif url.startswith( "http://www.lindwurm-linden.de/termine" ):
            scraper = LindwurmScraper( url )
            if title == '':
                title = "Lindwurm: Termine"
        else:
            raise UnsupportedService( "No scraper found for this URL." )

        self.feed = FeedGenerator()        
        self.feed.id( url )
        self.feed.title( title )
        self.feed.author( { "name": url } )

        if feedURL != '':
            self.feed.link( href=feedURL, rel='self' )

        for entry in scraper.entries:
            fe = self.feed.add_entry()
            fe.id( entry['url'] )
            fe.title( entry['title'] )
            fe.link( href=entry['url'], rel='alternate' )
            fe.content( entry['text'] )

    def GetAtom( self ):
        return self.feed.atom_str( pretty=True ).decode()
Ejemplo n.º 13
0
class TestExtensionTorrent(unittest.TestCase):

    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('torrent')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_podcastEntryItems(self):
        fe = self.fg.add_item()
        fe.title('y')
        fe.torrent.filename('file.xy')
        fe.torrent.infohash('123')
        fe.torrent.contentlength('23')
        fe.torrent.seeds('1')
        fe.torrent.peers('2')
        fe.torrent.verified('1')
        assert fe.torrent.filename() == 'file.xy'
        assert fe.torrent.infohash() == '123'
        assert fe.torrent.contentlength() == '23'
        assert fe.torrent.seeds() == '1'
        assert fe.torrent.peers() == '2'
        assert fe.torrent.verified() == '1'

        # Check that we have the item in the resulting XML
        ns = {'torrent': 'http://xmlns.ezrss.it/0.1/dtd/'}
        root = etree.fromstring(self.fg.rss_str())
        filename = root.xpath('/rss/channel/item/torrent:filename/text()',
                              namespaces=ns)
        assert filename == ['file.xy']
Ejemplo n.º 14
0
class TestExtensionDc(unittest.TestCase):

    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('dc')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_entryLoadExtension(self):
        fe = self.fg.add_item()
        try:
            fe.load_extension('dc')
        except ImportError:
            pass  # Extension already loaded

    def test_elements(self):
        for method in dir(self.fg.dc):
            if method.startswith('dc_'):
                m = getattr(self.fg.dc, method)
                m(method)
                assert m() == [method]

        self.fg.id('123')
        assert self.fg.atom_str()
        assert self.fg.rss_str()
Ejemplo n.º 15
0
def get_feed(query, title, description, link, image):
    """Get an RSS feed from the results of a query to the YouTube API."""
    service = _get_youtube_client()
    videos = service.search().list(part='snippet', **query, order='date',
                                   type='video', safeSearch='none').execute()
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.title(title)
    fg.description(description)
    fg.link(href=link, rel='alternate')
    fg.image(image)
    youtube_plugin = get_plugin_from_settings()

    for video in videos['items']:
        try:
            video_url = youtube_plugin.extract_link(
                "https://www.youtube.com/watch?v=" + video['id']['videoId'])
        except PluginException:
            continue
        fe = fg.add_entry()
        fe.id(video['id']['videoId'])
        fe.title(video['snippet']['title'])
        fe.description(video['snippet']['description'])
        fe.pubdate(dateutil.parser.parse(video['snippet']['publishedAt']))
        fe.podcast.itunes_image(video['snippet']['thumbnails']['high']['url'])
        video_info = requests.head(video_url)
        fe.enclosure(video_url, video_info.headers['Content-Length'],
                     video_info.headers['Content-Type'])
    return fg.rss_str(pretty=True)
Ejemplo n.º 16
0
Archivo: ming.py Proyecto: adow/ming
 def render_atom(self):
     fg = FeedGenerator()
     fg.id(self.site_url)
     fg.title(self.site_title)
     fg.link(href = self.site_url,rel = 'alternate')
     fg.link(href = self.site_url + 'atom.xml',rel = 'self')
     fg.language('zh-cn')
     link_list = ArticleManager.sharedManager().link_list()
     for link in link_list:
         article = ArticleManager.sharedManager().article_for_link(link)
         if not article:
             continue
         fe = fg.add_entry()
         fe.id(article.article_link)
         fe.link(link = {'href':self.site_url + article.article_link})
         fe.title(article.article_title)
         fe.description(article.article_subtitle or '')
         fe.author(name = article.author or '',
                 email = article.author_email or '')
         d = datetime.strptime(article.article_publish_date,'%Y-%m-%d') 
         pubdate = datetime(year = d.year, month = d.month, day = d.day,tzinfo = UTC(8))
         fe.pubdate(pubdate) 
         article.render_content_html()
         fe.content(content = article._content_html,
                 type = 'html')
     atom_feed = fg.atom_str(pretty = True)
     return atom_feed
def score_by_community(Community):
    post = r"http://*****:*****@csu.fullerton.edu'} )
                fe.link( href=each["URLResource"], rel='alternate' )
                fe.content(each["Community"])
                temp+=1
                break
    rssfeed  = fg.rss_str(pretty=True)
    return rssfeed  
Ejemplo n.º 18
0
class YoutubeFeed:  
    ydl_opts = {
        'format': 'bestaudio/best',
        'outtmpl': '%(id)s.%(ext)s',
        'postprocessors': [{
            'key': 'FFmpegExtractAudio',
            'preferredcodec': 'mp3',
            'preferredquality': '192',
        }]
    }

    def __init__(self, name):
        self.name = name
        self.ydl = youtube_dl.YoutubeDL(self.ydl_opts)

        self.fg = FeedGenerator()
        self.fg.title(name)
        self.fg.author({"name": "Youtube Audio Feed", "email": ""})
        self.fg.link(href="http://www.foo.bar.baz.com", rel="alternate")
        self.fg.description("Personalized Youtube audio feed")
        self.fg.generator("")
        self.fg.docs("")

    def add_video(self, url):
        info = self.ydl.extract_info(url, download=True)
        entry = self.fg.add_entry()
        entry.id(info['id'])
        entry.title(info['title'])
        entry.description(info['description'])
        entry.enclosure(info['id'] + ".mp3", str(info['duration']), 'audio/mpeg')

    def save(self):
        self.fg.rss_file(name + '.xml')
Ejemplo n.º 19
0
class RssHistory(View):
    """RSS History Page Controller"""

    @redirect_if_not_installed
    def get(self, request):

        self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
        self.__fg = FeedGenerator()
        self.__context = Context()
        self.__option_entity = OptionEntity()

        self.__context.autoload_options()
        self.__context.push({
            "page_title": self.__context.get("app_name", os.getenv("APP_NAME", "Silverback")),
            "is_authenticated": request.user and request.user.is_authenticated
        })

        self.__fg.id('http://silverbackhq.org')
        self.__fg.title('Some Testfeed')
        self.__fg.author({'name': 'John Doe', 'email': '*****@*****.**'})
        self.__fg.link(href='http://example.com', rel='alternate')
        self.__fg.logo('http://ex.com/logo.jpg')
        self.__fg.subtitle('This is a cool feed!')
        self.__fg.link(href='http://silverbackhq.org/test.atom', rel='self')
        self.__fg.language('en')

        return HttpResponse(self.__fg.atom_str(), content_type='text/xml')
Ejemplo n.º 20
0
def get_feed(atom=False):
    fg = FeedGenerator()
    domain = get_domain()
    items = get_posts({"limit": "10"}, full=True)["results"]
    fg.id("http://%s/"%domain)
    fg.title("Blog do MatrUFSC2")
    fg.description("Feed do blog do MatrUFSC2, onde noticias e novos recursos sao anunciados primeiro!")
    fg.language('pt-BR')
    fg.link({"href":"/blog/feed","rel":"self"})
    fg.updated(items[0]["posted_at"].replace(tzinfo=pytz.UTC))
    for item in items:
        entry = fg.add_entry()
        entry.title(item["title"])

        tree = html.fromstring(item["summary"])
        cleaner = Cleaner(allow_tags=[])
        tree = cleaner.clean_html(tree)

        text = tree.text_content()
        entry.description(text, True)
        entry.link({"href":item["link"],"rel":"self"})
        entry.content(item["body"])
        entry.published(item["posted_at"].replace(tzinfo=pytz.UTC))
        entry.updated(item["posted_at"].replace(tzinfo=pytz.UTC))
        entry.category({"label": item["category"]["title"], "term": item["category"]["slug"]})
        entry.id(item["id"])
    if atom:
        return fg.atom_str(pretty=True)
    else:
        return fg.rss_str(pretty=True)
Ejemplo n.º 21
0
def feed():
    """
    Generate atom feed
    """
    entries = parse_posts(0, C.feed_count)
    fg = FeedGenerator()
    fg.id(str(len(entries)))
    fg.title(C.title)
    fg.subtitle(C.subtitle)
    fg.language(C.language)
    fg.author(dict(name=C.author, email=C.email))
    fg.link(href=C.root_url, rel='alternate')
    fg.link(href=make_abs_url(C.root_url, 'feed'), rel='self')
    for entry in entries:
        fe = fg.add_entry()
        fe.id(entry.get('url'))
        fe.title(entry.get('title'))
        fe.published(entry.get('date'))
        fe.updated(entry.get('updated') or entry.get('date'))
        fe.link(href=make_abs_url(C.root_url, entry.get('url')), rel='alternate')
        fe.author(dict(name=entry.get('author'), email=entry.get('email')))
        fe.content(entry.get('body'))

    atom_feed = fg.atom_str(pretty=True)
    return atom_feed
Ejemplo n.º 22
0
    def build_feed(self):
        "Build the feed given our existing URL"
        # Get all the episodes
        page_content = str(requests.get(self.url).content)
        parser = BassdriveParser()
        parser.feed(page_content)
        links = parser.get_links()

        # And turn them into something usable
        fg = FeedGenerator()
        fg.id(self.url)
        fg.title(self.title)
        fg.description(self.title)
        fg.author({'name': self.dj})
        fg.language('en')
        fg.link({'href': self.url, 'rel': 'alternate'})
        fg.logo(self.logo)

        for link in links:
            fe = fg.add_entry()
            fe.author({'name': self.dj})
            fe.title(link[0])
            fe.description(link[0])
            fe.enclosure(self.url + link[1], 0, 'audio/mpeg')

            # Bassdrive always uses date strings of
            # [yyyy.mm.dd] with 0 padding on days and months,
            # so that makes our lives easy
            date_start = link[0].find('[')
            date_str = link[0][date_start:date_start+12]
            published = datetime.strptime(date_str, '[%Y.%m.%d]')
            fe.pubdate(UTC.localize(published))
            fe.guid((link[0]))

        return fg
Ejemplo n.º 23
0
def create_podcast_from_channel(url, podcast_root_url, items_to_process):
    feed = feedparser.parse(url)
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.title(feed['channel']['title'])
    fg.link(href=feed['channel']['href'], rel='alternate')
    fg.description('Audio tracks from YouTube Channel "{}"'.format(feed['channel']['title']))
    # fg.image(url=feed.image.href, width=feed.image.width, height=feed.image.height)

    output_dir = '/y2p/output'

    for item in feed['items'][:items_to_process]:
        sys.stdout.write(item['link'] + '\n')

        our_id = hashlib.md5(item['link'].encode()).hexdigest()

        audio_fname = our_id + '.m4a'
        audio_fullpath = os.path.join(output_dir, audio_fname)

        if not os.path.exists(audio_fullpath):
            create_audio_file(audio_fullpath, our_id, item['link'])

        p_entry = fg.add_entry()
        p_entry.id(item['id'])
        p_entry.title(item['title'])
        p_entry.description(item['summary'])
        p_entry.enclosure(podcast_root_url + '/' + audio_fname, 0, 'audio/m4a')
        p_entry.published(item['published'])

    fg.rss_str(pretty=True)
    fg.rss_file(os.path.join(output_dir, 'feed.xml'))
Ejemplo n.º 24
0
def all_recent_posts():
    url = 'http://localhost:2015/posts/all/recent/{num}'
    url = url.format(num = 25)
    headers = {'content-type': 'application/json'}
    r = requests.get(url, headers=headers)
    data = r.json()
    final_data = data['data']

    fg = FeedGenerator()
    fg.id(url)
    fg.title('Microservice - view all')
    fg.link( href=url, rel='alternate' )
    fg.description('The 25 most recent posts to any community')
    fg.language('en')

    #This is genating the RSS feeds
    for i in final_data:
        fe = fg.add_entry()
        fe.id(i['post_id'])
        fe.author({'name': i['author'], 'email':""})
        fe.title(i['title'])
        fe.description(i['community'])
        date = datetime.fromtimestamp(i['date'])
        date = date.replace(tzinfo=pytz.utc)
        print(date)
        fe.published(date)
        fe.content(content=i['text'])
        if 'url' in i:
            fe.source(url=i['url'], title=i['title'])

    rssfeed  = fg.rss_str(pretty=True) # Get the RSS feed as string
    resp = flask.Response(rssfeed, mimetype='text/xml', status = 200, content_type = 'application/rss+xml; charset=UTF-8')
    return resp
Ejemplo n.º 25
0
    def create_feed(self):
        """
        replace results by info
        #results keys
        _type entries id title uploader uploader_id uploader_url extractor webpage_url webpage_url_basename extractor_key
        """
        fg = FeedGenerator()
        fg.load_extension('podcast')
        fg.podcast.itunes_category('Podcasting')

        fg.title(self.infos['title'])
        fg.description('none')
        fg.link(href=self.args.podcast_url, rel='self')

        for item in self.infos['entries']:
            """
            #infos['entries'] keys
            id uploader uploader_id uploader_url channel_id channel_url upload_date license creator title alt_title thumbnail description categories tags subtitles automatic_captions duration
            """
            fe = fg.add_entry()
            fe.id(item['id'])
            fe.title(item['title'])
            fe.description(item['description'])
            item_full_path = self.args.podcast_url + '/' + self.infos[
                'title'] + '/' + item['title'] + '.mp3'
            fe.enclosure(item_full_path, str(item['duration']), 'audio/mpeg')

        fg.rss_str(pretty=True)
        # create folder of feed if it doesn't exists
        os.makedirs(self.args.dir + '/' + self.infos['title'], exist_ok=True)
        fg.rss_file(self.args.dir + '/' + self.infos['title'] + '/podcast.xml')

        return True
Ejemplo n.º 26
0
def generateFeeds(buffered, meta):
    utc = pytz.utc
    fg = FeedGenerator()
    fg.id(meta['id'])
    fg.title(meta['title'])
    fg.author(meta['author'])
    fg.subtitle(meta['subtitle'])
    fg.link(href=meta['link'], rel='self')
    fg.language(meta['language'])

    for tweet in buffered:
        fe = fg.add_entry()
        fe.id(tweet['url'].decode('utf-8'))
        fe.published(
            utc.localize(tweet['created_at']).astimezone(
                pytz.timezone(locale)))

        #fe.guid(tweet['url'].decode('utf-8'))
        fe.link(href=tweet['url'].decode('utf-8'), rel='alternate')
        fe.title(tweet['readable_title'])
        fe.description(tweet['readable_article'])

        try:
            fe.author({
                'name':
                '',
                'email':
                tweet['user_name'].decode('utf-8') + ": " +
                tweet['text'].decode('utf-8')
            })
        except Exception, e:
            logger.error(e)
            fe.author({'name': 'a', 'email': '*****@*****.**'})
Ejemplo n.º 27
0
def build():
    global fg
    fg = FeedGenerator()
    fg.title(parser.get('technowatch', 'name'))
    fg.language('en')
    fg.description(parser.get('technowatch', 'name'))
    fg.link(href=parser.get('technowatch', 'link'), rel='alternate')
    # Cleaning stories if too much
    if len(known_stories) > int(parser.get('technowatch', 'cache_max')):
        clean()
    # Sorting stories by crawled date
    for item in sorted(known_stories.values(), key=operator.itemgetter('crawledDate'), reverse=True):
        fe = fg.add_entry()
        fe.link(href=item['url'], rel='alternate')
        fe.title("[" + item['type'] + "] " + item['title'])
        fe.category({
            'label': item['type'],
            'term': item['type']
        })
        fe.author({'name': item['by']})
        fe.description(item['desc'])
        fe.pubdate(item['crawledDate'])
    # Caching RSS building
    pickle.dump(known_stories, open(cust_path + "/technowatch.data", "wb"))
    if parser.get('wsgi', 'activated') == "True":
        fg.rss_file(cust_path + '/static/rss.xml')
    if parser.get('ftp', 'activated') == "True":
        upload()
Ejemplo n.º 28
0
    def __call__(self, value, system):

        feed = FeedGenerator()
        feed.id(system["request"].url)
        feed.title(value["title"])
        feed.description("Log Cabin")
        feed.link(rel="self", href=system["request"].url)
        feed.language("en")

        if system["renderer_name"] == "atom":
            feed.link([{
                "rel": "alternate",
                "type": content_type,
                "href": url
            } for content_type, url in system["request"].extensions])

        for entry in value["entries"]:
            feed.add_entry(entry)

        system["request"].response.headers[
            "Content-type"] = extension_content_types[
                system["renderer_name"]] + "; charset=UTF-8"

        if system["renderer_name"] == "rss":
            return feed.rss_str(pretty=True)
        else:
            return feed.atom_str(pretty=True)
Ejemplo n.º 29
0
  def makeRss(self):
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.id('http://hypecast.blackmad.com/' + self.mode)
    fg.title('Hype Machine Robot Radio: ' + self.mode)
    fg.author( {'name':'David Blackmad','email':'*****@*****.**'} )
    fg.logo('http://dump.blackmad.com/the-hype-machine.jpg')
    fg.language('en')
    fg.link(href='http://hypecast.blackmad.com/' + self.mode)
    fg.description('Hype Machine Robot Radio: ' + self.mode)

    description = ' <br/>'.join(['%s. %s' % (index + 1, self.mk_song_id(s)) for index, s in enumerate(self.songs)])

    fe = fg.add_entry()
    fe.title(self.track_name)
    fe.description(description)
    fe.id(self.filename)
    # add length
    print(self.relative_dir)
    print(self.filename)
    fe.enclosure(url = 'http://hypecast.blackmad.com/%s' % (self.filename), type="audio/mpeg")

    rss_str = fg.rss_str()
    newItem = ET.fromstring(rss_str)[0].find('item')
    out = open(self.get_filename('xml'), 'w')
    out.write(ET.tostring(newItem))
    out.close()
    self.updateRss()
Ejemplo n.º 30
0
def generateFeed(urls, query, search_engine):
    """
    Generates RSS feed from the given urls
    :param urls: List of URL. Each entry contains url, title, short description
    :param feed_title: Title of the feed
    :param feed_link: Link of the feed
    :param feed_descr: Short description of feed
    """
    # google as search engine
    if search_engine == 0:
        feed = google_feed
    # duckduckgo as search engine
    elif search_engine == 1:
        feed = duckduckgo_feed
    elif search_engine == 2:
        feed = bing_feed
    elif search_engine == 3:
        feed = askcom_feed

    fg = FeedGenerator()
    fg.title(feed[0])
    fg.link(href=feed[1], rel='alternate')
    fg.description(feed[2] % query)

    for url in urls:
        fe = fg.add_entry()
        fe.title(url[0])
        fe.link({'href': url[1], 'rel': 'alternate'})
        fe.description(url[2])

    print(fg.rss_str(pretty=True))
Ejemplo n.º 31
0
def gen_feed(user, base_url, path, debug=False):
    # Create feed
    feed = FeedGenerator()
    feed.id(urlparse.urljoin(base_url, user + '.xml'))
    feed.title('Snapchat story for ' + user)
    feed.link(href=urlparse.urljoin(base_url, user + '.xml'), rel='self')
    feed.language('en')
    feed.description('Snapchat media')

    # Iterate through files in path, sort by unix timestamp (newest first), then add to feed
    files = sorted(os.listdir(path), reverse=True)

    for filename in files:
        split = filename.split('~')

        if split[0] != user:
            continue

        if os.path.splitext(filename)[1] in ['.mp4', '.jpg']:
            entry = feed.add_entry()
            entry.id(urlparse.urljoin(base_url, filename))
            entry.link(href=urlparse.urljoin(base_url, filename))
            entry.title(filename)

    # Write feed to disk
    feed.rss_file(os.path.join(path, user + '.xml'))
    date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")

    if debug:
        print('{0}  Regenerated {1}'.format(
            date, urlparse.urljoin(base_url, user + '.xml')))
Ejemplo n.º 32
0
def serialize_category_atom(category, url, user, event_filter):
    """Export the events in a category to Atom

    :param category: The category to export
    :param url: The URL of the feed
    :param user: The user who needs to be able to access the events
    :param event_filter: A SQLalchemy criterion to restrict which
                         events will be returned.  Usually something
                         involving the start/end date of the event.
    """
    query = (Event.query.filter(Event.category_chain_overlaps(
        category.id), ~Event.is_deleted, event_filter).options(
            load_only('id', 'category_id', 'start_dt', 'title', 'description',
                      'protection_mode', 'access_key'),
            subqueryload('acl_entries')).order_by(Event.start_dt))
    events = [e for e in query if e.can_access(user)]

    feed = FeedGenerator()
    feed.id(url)
    feed.title('Indico Feed [{}]'.format(category.title))
    feed.link(href=url, rel='self')

    for event in events:
        entry = feed.add_entry(order='append')
        entry.id(event.external_url)
        entry.title(event.title)
        entry.summary(unicode(event.description))
        entry.link(href=event.external_url)
        entry.updated(event.start_dt)
    return BytesIO(feed.atom_str(pretty=True))
Ejemplo n.º 33
0
    def setUp(self):
        fg = FeedGenerator()
        self.feedId = 'http://example.com'
        self.title = 'Some Testfeed'

        fg.id(self.feedId)
        fg.title(self.title)
        fg.link(href='http://lkiesow.de', rel='alternate')[0]
        fg.description('...')

        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The First Episode')
        fe.content(u'…')

        # Use also the different name add_item
        fe = fg.add_item()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The Second Episode')
        fe.content(u'…')

        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The Third Episode')
        fe.content(u'…')

        self.fg = fg
Ejemplo n.º 34
0
def write_podcast(show, podcast_dir, base_public_url, showlocal_tz):
    """Create the podcast file."""
    fg = FeedGenerator()
    fg.load_extension('podcast')

    url = "{}{}.xml".format(base_public_url, show.id)
    fg.id(url.split('.')[0])
    fg.title(show.name)
    fg.image(show.image_url)
    fg.description(show.description)
    fg.link(href=url, rel='self')

    # collect all mp3s for the given show
    all_mp3s = glob.glob(os.path.join(podcast_dir, "{}_*.mp3".format(show.id)))

    for filepath in all_mp3s:
        filename = os.path.basename(filepath)
        mp3_date = _get_date_from_mp3_path(filepath, showlocal_tz)
        mp3_size = os.stat(filepath).st_size
        mp3_url = base_public_url + filename
        mp3_id = filename.split('.')[0]
        title = "Programa del {0:%d}/{0:%m}/{0:%Y}".format(mp3_date)

        # build the rss entry
        fe = fg.add_entry()
        fe.id(mp3_id)
        fe.pubdate(mp3_date)
        fe.title(title)
        fe.enclosure(mp3_url, str(mp3_size), 'audio/mpeg')

    fg.rss_str(pretty=True)
    fg.rss_file(os.path.join(podcast_dir, '{}.xml'.format(show.id)))
def posts_by_score_community():
    vote = "http://*****:*****@csu.fullerton.edu'} )
                fe.link( href=post["URLResource"], rel='alternate' )
                # fe.content({'content':post["Content"], 'src':'http://localhost:5000/posts/all', 'type':post["CDATA"]} )
                fe.description(post["Community"])
                i+=1
                break
    rssfeed  = fg.rss_str(pretty=True)
    return rssfeed  
Ejemplo n.º 36
0
def init_feed() -> None:
    """
    This function initialises the RSS feed with the
    correct attributes.
    """
    log.debug("Initialising the feed...")

    global fg

    try:
        fg = FeedGenerator()
        # Setup [root] feed attributes
        fg.id("https://www.dagzure.com/dynamics_feed.xml")
        fg.title("Dynamics 365 Feeds")
        fg.generator("SingleRSS/v1.0.1")
        fg.link(href="https://www.dagzure.com/dynamics_feed.xml", rel="self")
        fg.subtitle("Combined feed for RSS feeds")
        fg.language('en')
    except:
        log.error("Error initialising the feed!")
        sys.exit(1)

    log.debug("Feed initialised!")

    return None
Ejemplo n.º 37
0
def generate_feed(output_file, exclude_highlights=True):
    # Parse RSS feed
    d = feedparser.parse(ESPN_RSS_FEED)
    IMAGE_URL = d.feed.image["href"]

    # RSS feed generation
    fg = FeedGenerator()
    fg.load_extension("podcast", rss=True)

    ## RSS tags
    # Required
    fg.title(d.feed.title)
    fg.link(href="https://github.com/aaearon/lebatard-show-rss")
    fg.description(d.feed.description)
    # Optional
    fg.language(d.feed.language)
    fg.image(IMAGE_URL)
    fg.subtitle(d.feed.subtitle)
    # iTunes
    fg.podcast.itunes_author(d.feed.author)
    fg.podcast.itunes_category(itunes_category=d.feed.category)
    fg.podcast.itunes_image(itunes_image=IMAGE_URL)
    fg.podcast.itunes_explicit(itunes_explicit="clean")
    fg.podcast.itunes_owner(name=CONTACT["name"], email=CONTACT["email"])

    tz = pytz.timezone("America/Los_Angeles")

    for e in d.entries:

        if exclude_highlights and episode_duration_string_to_int(e["itunes_duration"]) > 3600:
            pass
        else:
            fe = fg.add_entry()

            fe.id(e.id)
            fe.title(e.title)
            fe.description(e.description)
            fe.enclosure(url=e.enclosures[0]["href"], length=e.enclosures[0]["length"], type=e.enclosures[0]["type"])

            fe.podcast.itunes_summary(e.description)
            fe.podcast.itunes_subtitle(e.description)
            fe.podcast.itunes_duration(e["itunes_duration"])

            dt = datetime.fromtimestamp(time.mktime(e.published_parsed))
            date = tz.localize(dt)

            # Local hour
            if "Show: " in e.title:
                fe.published(date)
            elif "Hour 1" in e.title:
                fe.published(date + timedelta(hours=1))
            elif "Hour 2" in e.title:
                fe.published(date + timedelta(hours=2))
            elif "Hour 3" in e.title:
                fe.published(date + timedelta(hours=3))
            else:
                fe.published(date + timedelta(hours=-1))

    fg.rss_str(pretty=True)
    fg.rss_file(output_file)
Ejemplo n.º 38
0
def generate_rss_from_articles(feed_settings, articles):
    """
    Creates a FeedGenerator feed from a set of feed_entries.
    
    :param feed_settings: a feed_settings object containing 
    :param articles: 
    :return: 
    """

    # create the feed
    output_feed = FeedGenerator()

    # add metadata to the feed
    # TODO this feels like it can be done without output_rss on every line but...Python newbie
    output_feed.title(feed_settings.title)
    output_feed.author(feed_settings.author)
    output_feed.link(href=feed_settings.source_page_url, rel='alternate')
    output_feed.link(href=feed_settings.output_url, rel='self')
    output_feed.logo(feed_settings.logo_img_url)
    output_feed.subtitle(feed_settings.subtitle)
    output_feed.language(feed_settings.language)
    # output_rss.id(UM_SOMETHING)

    # add each feed item
    for article in articles:
        feed_entry_added = output_feed.add_entry()
        feed_entry_added.id(article.link)  # ATOM
        # guid for RSS?
        feed_entry_added.link(href=article.link, rel='alternate')  # ATOM
        feed_entry_added.title(article.title)
        feed_entry_added.description(article.description)
        # feed_entry_added.link(article.link)

    return output_feed
Ejemplo n.º 39
0
def getcontent():
    req = requests.get('http://localhost/article/content',
                       json={"count": 10},
                       auth=('*****@*****.**', 'adminpassword'))
    value = req.json()
    fg = FeedGenerator()
    fg.title('A full feed')
    fg.link(href='http://localhost/feed/summary/content')
    fg.subtitle('This is a cool feed!')
    fg.language('en')

    for x in value:
        fe = fg.add_entry()
        fe.content(x.get('articles_content', ''))
        if x.get('articles_tags', '') != 'null':
            for v in x.get('articles_tags', ''):
                fe.category(term='Tag: ' + v)
        else:
            fe.category(term='Tag: None')
        if type(x.get('articles_comments', '')) == type(None):
            count = '0'
        else:
            count = str(len(x.get('articles_comments', '')))
        fe.category(term='Comment Count: ' + count)

    fg.rss_file('content.xml')
    return "YOUR CONTENT WAS CREATED content.xml", 200
Ejemplo n.º 40
0
def run(folder, url):
    from feedgen.feed import FeedGenerator

    fg = FeedGenerator()

    head, tail = os.path.split(folder)

    title = tail.decode("utf-8")
    fg.id(str(uuid.uuid4()))
    fg.title(title)

    fg.link(href="{0}/rss.xml".format(url), rel="self")
    fg.description(u"Audiobook `{0}` generated with rssbook".format(title))

    fg.load_extension("podcast")
    for item in sorted(os.listdir(folder)):
        if os.path.splitext(item)[1] == ".mp3":
            get_node(os.path.join(folder, item))

        fullpath = os.path.join(folder, item)
        fe = fg.add_entry()
        fe.id(str(uuid.uuid4()))
        fe.title(title)
        fe.description(item)

        fe.link(
            href="{0}/{1}".format(url, item), rel="enclosure", type="audio/mpeg", length=str(os.stat(fullpath).st_size)
        )

    fg.rss_file(os.path.join(folder, "rss.xml"))
Ejemplo n.º 41
0
    def build_artifact(self, artifact):
        ctx = get_ctx()
        feed_source = self.source
        page = feed_source.parent

        fg = FeedGenerator()
        fg.id(get_id(ctx.env.project.id))
        fg.title(page.record_label + u" — Pallets Project")
        fg.link(href=url_to("/blog", external=True))
        fg.link(href=url_to(feed_source, external=True), rel="self")

        for item in page.children.order_by('-pub_date', '-pub_order',
                                           'title').limit(10):
            fe = fg.add_entry()
            fe.title(item["title"])
            fe.content(text_type(item["body"]), type="html")
            fe.link(href=url_to(item, external=True))
            fe.id(
                get_id(u"{}/{}".format(ctx.env.project.id,
                                       item["_path"].encode("utf-8"))))
            fe.author(name=item["author"])
            updated = datetime(*item["pub_date"].timetuple()[:3])
            updated = updated.isoformat() + "Z" if not updated.tzinfo else ""
            fe.updated(updated)

        with artifact.open('wb') as f:
            f.write(fg.atom_str(pretty=True))
Ejemplo n.º 42
0
def rss():    
    config = public_app.config['feed']
    fg = FeedGenerator()
    fg.id('%s/blog' % Config.BASE_URL)
    fg.title(config['title'])
    fg.author( {'name': config['author'],'email': config['email']} )
    fg.description(config['desc'])
    fg.link( href=Config.BASE_URL, rel='alternate' )
    query = {
        'id': { '$regex': 'blog' },
        'current': True,
        'meta.hide': { '$ne': True }
    }
    posts = db.pages.find(query).sort('meta.created', -1)[:20]
    for post in posts:
        fe = fg.add_entry()
        fe.title(post['meta']['title'])
        if 'author' in post['meta']:
            fe.author( {'name': post['meta']['author'],'email': config['email']} )
        else:
            fe.author( {'name': config['author'],'email': config['email']} )
        fe.description(do_truncate(post['content'], 300))
        fe.link(href="%s/%s" % (Config.BASE_URL, post['id']), rel='alternate')
        fe.pubdate(utc.localize(post['meta']['created']))
        fe.content(post['content'])    
    response.headers['Content-Type'] = 'application/rss+xml'
    return fg.rss_str(pretty=True)
Ejemplo n.º 43
0
class TestExtensionSyndication(unittest.TestCase):

    SYN_NS = {'sy': 'http://purl.org/rss/1.0/modules/syndication/'}

    def setUp(self):
        self.fg = FeedGenerator()
        self.fg.load_extension('syndication')
        self.fg.title('title')
        self.fg.link(href='http://example.com', rel='self')
        self.fg.description('description')

    def test_update_period(self):
        for period_type in ('hourly', 'daily', 'weekly', 'monthly', 'yearly'):
            self.fg.syndication.update_period(period_type)
            root = etree.fromstring(self.fg.rss_str())
            a = root.xpath('/rss/channel/sy:UpdatePeriod',
                           namespaces=self.SYN_NS)
            assert a[0].text == period_type

    def test_update_frequency(self):
        for frequency in (1, 100, 2000, 100000):
            self.fg.syndication.update_frequency(frequency)
            root = etree.fromstring(self.fg.rss_str())
            a = root.xpath('/rss/channel/sy:UpdateFrequency',
                           namespaces=self.SYN_NS)
            assert a[0].text == str(frequency)

    def test_update_base(self):
        base = '2000-01-01T12:00+00:00'
        self.fg.syndication.update_base(base)
        root = etree.fromstring(self.fg.rss_str())
        a = root.xpath('/rss/channel/sy:UpdateBase', namespaces=self.SYN_NS)
        assert a[0].text == base
Ejemplo n.º 44
0
def feed(column_id):
    api = Api(column_id)

    with request.urlopen(api.info) as stream:
        result = stream.read().decode('utf-8')

    if not result:
        return '', 404

    info = json.loads(result)

    with request.urlopen(api.posts) as stream:
        result = stream.read().decode('utf-8')
        entries = json.loads(result)

    fg = FeedGenerator()
    fg.id(str(entries[0]['slug']))
    fg.title(info['name'])
    fg.language('zh_CN')
    fg.icon(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 's'))
    fg.logo(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 'l'))
    fg.description(info['intro'])
    fg.author(dict(name=info['creator']['name']))
    fg.link(href=api.base_url + info['url'], rel='alternate')
    for entry in entries:
        fe = fg.add_entry()
        fe.id(entry['url'])
        fe.title(entry['title'])
        fe.published(entry['publishedTime'])
        fe.updated(entry['publishedTime'])
        fe.author(dict(name=entry['author']['name']))
        fe.link(href=api.base_url + entry['url'], rel='alternate')
        fe.content(entry['content'])

    return fg.atom_str(pretty=True)
Ejemplo n.º 45
0
def gen_feed(endpoint):
    # Make sure we have somewhere to save the files
    if not os.path.isdir('./gen'):
        print('There is no gen directory. Create ./gen')

    # Uses parse_dir.py to get the books and files
    books = parse_dir.getbooks_r('./audiobooks')

    for (book, files) in books:
        # Creates a new feed for each book
        fg = FeedGenerator()
        fg.load_extension('podcast')

        fg.podcast.itunes_category('Audiobook')

        for (file_name, file_path) in files:
            # the 1: removes the period because the base dir is ./audiobooks
            url = endpoint + file_path[1:]

            fe = fg.add_entry()
            fe.id(url)
            fe.title(file_name)
            fe.description(file_name)
            fe.enclosure(requote_uri(url), str(os.path.getsize(file_path)), 'audio/mpeg')

        fg.title(book)
        fg.link(href=endpoint, rel='self')
        fg.description(book)
        fg.rss_str(pretty=True)

        # Saves the file
        rss_file_path = os.path.join('./gen/', book + '.xml')
        ensure_dir(rss_file_path)
        logging.info("generate feed: %s" % rss_file_path)
        fg.rss_file(rss_file_path)
Ejemplo n.º 46
0
def generate_rss_feed(repo, filename, me):
    generator = FeedGenerator()
    generator.id(repo.html_url)
    generator.title(f"RSS feed of {repo.owner.login}'s {repo.name}")
    generator.author(
        {"name": os.getenv("GITHUB_NAME"), "email": os.getenv("GITHUB_EMAIL")}
    )
    generator.link(href=repo.html_url)
    generator.link(
        href=f"https://raw.githubusercontent.com/{repo.full_name}/master/{filename}",
        rel="self",
    )
    for issue in repo.get_issues():
        if not issue.body or not is_me(issue, me) or issue.pull_request:
            continue
        item = generator.add_entry(order="append")
        item.id(issue.html_url)
        item.link(href=issue.html_url)
        item.title(issue.title)
        item.published(issue.created_at.strftime("%Y-%m-%dT%H:%M:%SZ"))
        for label in issue.labels:
            item.category({"term": label.name})
        body = "".join(c for c in issue.body if _valid_xml_char_ordinal(c))
        item.content(CDATA(marko.convert(body)), type="html")
    generator.atom_file(filename)
Ejemplo n.º 47
0
def feed():
    root_url = request.url_root.rstrip("/")
    home_full_url = root_url + url_for(".index")
    feed_full_url = root_url + url_for(".feed")
    site = app.config["SITE_INFO"]
    site_tz = s2tz(site["timezone"]) or timezone(timedelta())
    # set feed info
    feed_gen = FeedGenerator()
    feed_gen.id(home_full_url)
    feed_gen.title(site.get("title", ""))
    feed_gen.subtitle(site.get("subtitle", ""))
    if "author" in site:
        feed_gen.author(name=site["author"])
    feed_gen.link(href=home_full_url, rel="alternate")
    feed_gen.link(href=feed_full_url, rel="self")
    # add feed entries
    posts = load_posts(meta_only=True)[:10]
    for i in range(len(posts)):
        p = load_post(posts[i]["filename"])
        if not p:
            continue
        feed_entry = feed_gen.add_entry()
        feed_entry.id(root_url + p["url"])
        feed_entry.link(href=root_url + p["url"])
        feed_entry.title(p["title"])
        feed_entry.content(p["content"])
        feed_entry.published(p["created"].replace(tzinfo=site_tz))
        feed_entry.updated(
            p.get("updated", p["created"]).replace(tzinfo=site_tz))
        if "author" in p:
            feed_entry.author(name=p["author"])
    # make http response
    resp = make_response(feed_gen.atom_str(pretty=True))
    resp.content_type = "application/atom+xml; charset=utf-8"
    return resp
Ejemplo n.º 48
0
def generate_feed(data):
    fg = FeedGenerator()
    fg.id(blog_root)
    fg.title(blog_title)
    fg.author({"name": my_name})
    fg.link(href=blog_root)
    fg.description("Programming, socialism, and Buddhism.")
    fg.language("en")

    for entry in data:
        url = site_root + entry["href"]
        # This causes every post to have a pubDate of midnight MST,
        # but since I don't keep track of the actual time, it's
        # close enough.
        published = datetime.strptime(
            str(entry["created"]) + "--07:00", "%Y-%m-%d-%z")
        fe = fg.add_entry()
        fe.id(url)
        fe.title(entry["title"])
        fe.link(href=url)
        fe.author({"name": my_name})
        fe.description(entry["body"])
        fe.pubDate(published)

    fg.rss_file("rss.xml")
Ejemplo n.º 49
0
class Rss:
    def __init__(self, title='', link='', logo='', description='none'):
        self.title = title
        self.fg = FeedGenerator()
        self.fg.load_extension('podcast')
        self.fg.title(title)
        self.fg.description(description)
        self.fg.link(href=link, rel='alternate')
        self.fg.generator('rss-it')

        if logo:
            self.fg.logo(logo)

    def add_video(self, link, title, pubdate, description=''):
        fe = self.fg.add_entry()
        fe.id(link)
        fe.title(title)
        fe.description(description)
        fe.pubDate(pubdate)

        mimetype = 'audio/mpeg'
        if link.endswith('.m4a'):
            mimetype = 'audio/mp4'
        # add more formats if needed (usually works with audio/mpeg by default)

        fe.enclosure(link, '0', mimetype)

    def export(self):
        return self.fg.rss_str(pretty=True)
Ejemplo n.º 50
0
    def setUp(self):
        fg = FeedGenerator()
        self.feedId = 'http://example.com'
        self.title = 'Some Testfeed'

        fg.id(self.feedId)
        fg.title(self.title)
        fg.link(href='http://lkiesow.de', rel='alternate')[0]
        fg.description('...')

        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The First Episode')
        fe.content(u'…')

        # Use also the different name add_item
        fe = fg.add_item()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The Second Episode')
        fe.content(u'…')

        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1')
        fe.title('The Third Episode')
        fe.content(u'…')

        self.fg = fg
Ejemplo n.º 51
0
def generate(app, category, torrents):
    """
    generate an rss feed from category with torrents as results
    if category is None this feed is for all categories
    """
    feed = FeedGenerator()
    if category:
        url = util.fullSiteURL(app, 'feed', '{}.rss'.format(category))
    else:
        url = util.fullSiteURL(app, 'feed', 'all.rss')
    feed.link(href=url, rel="self")
    feed.id(url)
    if category:
        title = "new {} torrents on index ex invisibilis".format(category)
    else:
        title = "new torrents on index ex invisibilis"
    feed.title(title)
    feed.description(title)
    feed.author({"name": "anonymous"})
    feed.language("en")
    for torrent in torrents:
        item = feed.add_entry()
        url = util.fullSiteURL(app, torrent.downloadURL())
        item.id(torrent.infohash)
        item.link(href=url)
        item.title(torrent.title)
        item.description(torrent.summary(100))
    return feed
Ejemplo n.º 52
0
    def export_feed(self, output):
        fg = FeedGenerator()
        fg.load_extension('podcast')
        fg.podcast.itunes_category('Religion & Spirituality', 'Christianity')
        fg.podcast.itunes_image("%s/icon.png" % URL_BASE)

        fg.title('JW.ORG Magazines')
        fg.description(
            'Combined Feed of Watchtower (public), Watchtower (study), and Awake! in English from jw.org.'
        )
        fg.link(href="%s/%s" % (URL_BASE, output), rel='self')

        manifest = self._load()
        entries = []
        for lang, mnemonics in manifest.items():
            for mnemonic, issues in mnemonics.items():
                for issue, data in issues.items():
                    entries.append((issue, data))

        for issue, entry in sorted(entries, key=lambda i: i[0], reverse=True):
            fe = fg.add_entry()

            fe.id(entry['hash'])
            fe.title(entry['title'])
            fe.description(entry['title'])
            fe.published(pytz.utc.localize(entry['created_on']))
            url = "%s/%s" % (URL_BASE, os.path.basename(entry['file']))
            mime = 'audio/mpeg'
            fe.enclosure(url, str(entry['duration']), mime)
            fe.link(href=url, type=mime)
        fg.rss_str(pretty=True)
        fg.rss_file(os.path.join(CACHE_DIR, output))
Ejemplo n.º 53
0
def gen_feed(user, base_url, path, debug=False):
    # Create feed
    feed = FeedGenerator()
    feed.id(urlparse.urljoin(base_url, user + '.xml'))
    feed.title('Snapchat story for ' + user)
    feed.link( href=urlparse.urljoin(base_url, user + '.xml'), rel='self' )
    feed.language('en')
    feed.description('Snapchat media')


    # Iterate through files in path, sort by unix timestamp (newest first), then add to feed
    files = sorted(os.listdir(path), reverse=True)

    for filename in files:
        split = filename.split('~')

        if split[0] != user:
            continue
        
        if os.path.splitext(filename)[1] in ['.mp4', '.jpg']:
            entry = feed.add_entry()
            entry.id(urlparse.urljoin(base_url, filename))
            entry.link(href=urlparse.urljoin(base_url, filename))
            entry.title(filename)

    
    # Write feed to disk
    feed.rss_file(os.path.join(path, user + '.xml'))
    date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")

    if debug:
        print('{0}  Regenerated {1}'.format(date, urlparse.urljoin(base_url, 
                                                               user + '.xml')))
Ejemplo n.º 54
0
    def get(self):
        # self.write("Hello, world")
        #
        # print(page.content)
        p1 = Scrapper.ScrapeWebSite(
            'http://www.londonstockexchange.com/exchange/prices-and-markets/stocks/indices/constituents-indices.html?index=UKX&industrySector=&page=1'
        )
        p2 = Scrapper.ScrapeWebSite(
            'http://www.londonstockexchange.com/exchange/prices-and-markets/stocks/indices/constituents-indices.html?index=UKX&industrySector=&page=2'
        )
        p3 = Scrapper.ScrapeWebSite(
            'http://www.londonstockexchange.com/exchange/prices-and-markets/stocks/indices/constituents-indices.html?index=UKX&industrySector=&page=3'
        )
        p4 = Scrapper.ScrapeWebSite(
            'http://www.londonstockexchange.com/exchange/prices-and-markets/stocks/indices/constituents-indices.html?index=UKX&industrySector=&page=4'
        )
        p5 = Scrapper.ScrapeWebSite(
            'http://www.londonstockexchange.com/exchange/prices-and-markets/stocks/indices/constituents-indices.html?index=UKX&industrySector=&page=5'
        )
        p6 = Scrapper.ScrapeWebSite(
            'http://www.londonstockexchange.com/exchange/prices-and-markets/stocks/indices/constituents-indices.html?index=UKX&industrySector=&page=6'
        )

        shares = p1 + p2 + p3 + p4 + p5 + p6
        fg = FeedGenerator()
        fg.title("Footsie Shares")
        fg.link(href='http://localhost:5000', rel='alternate')
        fg.description('FTSE prices from last 15 mintues.')
        for s in shares:
            fe = fg.add_entry()
            s.get_rss_item(fe)
        self.write(fg.rss_str())
Ejemplo n.º 55
0
    def feed(self, feed_title, title, content, url, published=None, summary=None,
             enclosure=None, media_thumbnail=None):
        feed = FeedGenerator()
        feed.title(feed_title)
        feed.description(faker.sentence())
        feed.link({'href': WP_FEED_URL})

        entry = feed.add_entry()
        entry.title(title)
        entry.link({'href': url})
        entry.author(name=faker.name())
        entry.content(content, type="cdata")
        if summary:
            entry.description(summary)
        if enclosure:
            entry.enclosure(url=enclosure['url'],
                            type=enclosure['type'],
                            length=str(faker.pyint()))
        if media_thumbnail:
            feed.load_extension('media')
            entry.media.thumbnail({'url': media_thumbnail})
        tz = pytz.timezone(faker.timezone())
        published = published or faker.date_time(tzinfo=tz)
        entry.published(published)
        entry.updated(faker.date_time_between(start_date=published, tzinfo=tz))

        return feed.rss_str().decode('utf8')
Ejemplo n.º 56
0
def recent_releases(organization_name=None):
    """Generates a feed for the releases of an organization."""
    organization = Organization.query.filter(
        Organization.name == organization_name).first()
    if organization is None:
        abort(404)
    fg = FeedGenerator()
    fg.id(
        url_for(
            "organization_bp.recent_releases",
            organization_name=organization.name,
            _external=True,
        ))
    fg.title(f"Recent releases for {organization.name}")
    fg.link(href=request.url, rel="self")
    for project in organization.projects:
        for release in project.releases:
            fe = fg.add_entry()
            fe.id(f"{release.project.name} {release.version}")
            fe.title(f"{release.project.name} {release.version}")
            fe.description(release.changes)
            fe.link(href=release.release_url)
            fe.updated(release.published_at.replace(tzinfo=timezone.utc))
            fe.published(release.published_at.replace(tzinfo=timezone.utc))
    atomfeed = fg.atom_str(pretty=True)
    return atomfeed
Ejemplo n.º 57
0
def generate_rss(language, since):
    url = "{0}?since={1}".format(language["url"], since)
    file_name = "github_trends_{0}_{1}.rss".format(language["key"], since)
    title = "GitHub Trends - {0} - {1}".format(language["name"], since.capitalize())

    print(url)
    page = requests.get(url)
    tree = html.fromstring(page.content)
    lis = tree.cssselect("ol.repo-list li")

    fg = FeedGenerator()
    fg.title(title)
    fg.link(href="http://github-trends.ryotarai.info/rss/{0}".format(file_name))
    fg.description(title)
    index = 1
    for li in lis:
        a = li.cssselect("h3 a")[0]
        description = ""
        ps = li.cssselect("p")
        if len(ps) > 0:
            description = ps[0].text_content().strip()

        fe = fg.add_entry()
        fe.link(href="https://github.com{0}".format(a.get("href")))
        fe.title("{0} (#{1} - {2} - {3})".format(
            a.text_content().strip().replace(" / ", "/"),
            index,
            language["name"],
            since.capitalize(),
        ))
        fe.description(description)
        index += 1
    rssfeed = fg.rss_str(pretty=True)
    s3.Object(bucket, 'rss/{0}'.format(file_name)).put(Body=rssfeed, ContentType="application/xml")
Ejemplo n.º 58
0
def generate_feed_from_episodes(episodes):
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.id('http://dannyshaw.github.io/podcast-feeds')
    fg.title('Seinfeld Complete Audio')
    fg.link(href='http://dannyshaw.github.io/podcast-feeds', rel='alternate')
    fg.subtitle('I\'ve seen them enough, audio is all I need.')
    fg.link(
        href=
        'https://raw.githubusercontent.com/dannyshaw/podcast-feeds/master/podcast-feeds/seinfeld.xml',
        rel='self')
    fg.language('en')

    for index, ep in enumerate(episodes):
        file_size = getsize(join(FILES, ep))
        fe = fg.add_entry()
        fe.id(f'https://s3.amazonaws.com/danny.podcasts.seinfeld/{ep}')
        fe.title(ep)
        fe.description(ep)

        pub_date = datetime(1999, 1, 1, tzinfo=timezone.utc) + timedelta(index)

        fe.pubDate(pub_date)
        fe.link(href=f'https://s3.amazonaws.com/danny.podcasts.seinfeld/{ep}')
        fe.enclosure(f'https://s3.amazonaws.com/danny.podcasts.seinfeld/{ep}',
                     f'{file_size}', 'audio/mpeg')

    fg.rss_str(pretty=True)
    fg.rss_file('seinfeld.xml')
Ejemplo n.º 59
0
def main():

    client = moduleSocial.connectTumblr()

    posts = client.posts('fernand0')
    
    fg = FeedGenerator()
    fg.id(posts['blog']['url'])
    fg.title(posts['blog']['title'])
    fg.author( {'name':posts['blog']['name'],'email':'*****@*****.**'} )
    fg.link( href=posts['blog']['url'], rel='alternate' )
    fg.subtitle('Alternate feed due to Tumblr GDPR restrictions')
    fg.language('en')

    print(len(posts['posts']))
    for i in range(len(posts['posts'])):
        fe = fg.add_entry()
        print(posts['posts'][i]['post_url'])
        if 'title' in posts['posts'][i]:
            title = posts['posts'][i]['title']
            print('T', posts['posts'][i]['title'])
        else:
            title = posts['posts'][i]['summary'].split('\n')[0]
            print('S', posts['posts'][i]['summary'].split('\n')[0])
        fe.title(title)
        fe.link(href=posts['posts'][i]['post_url'])
        fe.id(posts['posts'][i]['post_url'])

    print(fg.atom_file('/var/www/html/elmundoesimperfecto/tumblr.xml'))

    sys.exit()
Ejemplo n.º 60
0
def generateFeeds(buffered, meta):
    utc = pytz.utc
    fg = FeedGenerator()
    fg.id(meta['id'])
    fg.title(meta['title'])
    fg.author(meta['author'])
    fg.subtitle(meta['subtitle'])
    fg.link( href=meta['link'], rel='self' )
    fg.language(meta['language'])

    for tweet in buffered:
        fe = fg.add_entry()
        fe.id(tweet['url'].decode('utf-8'))
        fe.published(utc.localize(tweet['created_at']).astimezone(pytz.timezone(locale)))
        
        #fe.guid(tweet['url'].decode('utf-8'))
        fe.link(href=tweet['url'].decode('utf-8'), rel='alternate')
        fe.title(tweet['readable_title'])
        fe.description(tweet['readable_article'])
                
        try:
            fe.author({'name': '', 'email':tweet['user_name'].decode('utf-8') + ": " + tweet['text'].decode('utf-8')})
        except Exception, e:
            logger.error(e)
            fe.author({'name': 'a', 'email':'*****@*****.**'})