コード例 #1
0
def feed(column_id):
    api = Api(column_id)

    with request.urlopen(api.info) as stream:
        result = stream.read().decode('utf-8')

    if not result:
        return '', 404

    info = json.loads(result)

    with request.urlopen(api.posts) as stream:
        result = stream.read().decode('utf-8')
        entries = json.loads(result)

    fg = FeedGenerator()
    fg.id(str(entries[0]['slug']))
    fg.title(info['name'])
    fg.language('zh_CN')
    fg.icon(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 's'))
    fg.logo(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 'l'))
    fg.description(info['intro'])
    fg.author(dict(name=info['creator']['name']))
    fg.link(href=api.base_url + info['url'], rel='alternate')
    for entry in entries:
        fe = fg.add_entry()
        fe.id(entry['url'])
        fe.title(entry['title'])
        fe.published(entry['publishedTime'])
        fe.updated(entry['publishedTime'])
        fe.author(dict(name=entry['author']['name']))
        fe.link(href=api.base_url + entry['url'], rel='alternate')
        fe.content(entry['content'])

    return fg.atom_str(pretty=True)
コード例 #2
0
ファイル: generate.py プロジェクト: aaearon/lebatard-show-rss
def generate_feed(output_file, exclude_highlights=True):
    # Parse RSS feed
    d = feedparser.parse(ESPN_RSS_FEED)
    IMAGE_URL = d.feed.image["href"]

    # RSS feed generation
    fg = FeedGenerator()
    fg.load_extension("podcast", rss=True)

    ## RSS tags
    # Required
    fg.title(d.feed.title)
    fg.link(href="https://github.com/aaearon/lebatard-show-rss")
    fg.description(d.feed.description)
    # Optional
    fg.language(d.feed.language)
    fg.image(IMAGE_URL)
    fg.subtitle(d.feed.subtitle)
    # iTunes
    fg.podcast.itunes_author(d.feed.author)
    fg.podcast.itunes_category(itunes_category=d.feed.category)
    fg.podcast.itunes_image(itunes_image=IMAGE_URL)
    fg.podcast.itunes_explicit(itunes_explicit="clean")
    fg.podcast.itunes_owner(name=CONTACT["name"], email=CONTACT["email"])

    tz = pytz.timezone("America/Los_Angeles")

    for e in d.entries:

        if exclude_highlights and episode_duration_string_to_int(e["itunes_duration"]) > 3600:
            pass
        else:
            fe = fg.add_entry()

            fe.id(e.id)
            fe.title(e.title)
            fe.description(e.description)
            fe.enclosure(url=e.enclosures[0]["href"], length=e.enclosures[0]["length"], type=e.enclosures[0]["type"])

            fe.podcast.itunes_summary(e.description)
            fe.podcast.itunes_subtitle(e.description)
            fe.podcast.itunes_duration(e["itunes_duration"])

            dt = datetime.fromtimestamp(time.mktime(e.published_parsed))
            date = tz.localize(dt)

            # Local hour
            if "Show: " in e.title:
                fe.published(date)
            elif "Hour 1" in e.title:
                fe.published(date + timedelta(hours=1))
            elif "Hour 2" in e.title:
                fe.published(date + timedelta(hours=2))
            elif "Hour 3" in e.title:
                fe.published(date + timedelta(hours=3))
            else:
                fe.published(date + timedelta(hours=-1))

    fg.rss_str(pretty=True)
    fg.rss_file(output_file)
コード例 #3
0
def generateFeeds(buffered, meta):
    utc = pytz.utc
    fg = FeedGenerator()
    fg.id(meta['id'])
    fg.title(meta['title'])
    fg.author(meta['author'])
    fg.subtitle(meta['subtitle'])
    fg.link( href=meta['link'], rel='self' )
    fg.language(meta['language'])

    for tweet in buffered:
        fe = fg.add_entry()
        fe.id(tweet['url'].decode('utf-8'))
        fe.published(utc.localize(tweet['created_at']).astimezone(pytz.timezone(locale)))
        
        #fe.guid(tweet['url'].decode('utf-8'))
        fe.link(href=tweet['url'].decode('utf-8'), rel='alternate')
        fe.title(tweet['readable_title'])
        fe.description(tweet['readable_article'])
                
        try:
            fe.author({'name': '', 'email':tweet['user_name'].decode('utf-8') + ": " + tweet['text'].decode('utf-8')})
        except Exception, e:
            logger.error(e)
            fe.author({'name': 'a', 'email':'*****@*****.**'})
コード例 #4
0
ファイル: main.py プロジェクト: dgomes/imap2rss
	def GET(self):
		cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
		fg = FeedGenerator()
		#TODO create icon
		# fg.icon('http://www.det.ua.pt')
		fg.id(config.get('rss','id'))
		fg.title(config.get('rss','title'))
		fg.subtitle(config.get('rss','subtitle'))
		fg.description(config.get('rss','description'))
		fg.author({'name': config.get('rss','author_name'), 'email':config.get('rss','author_email')})
		fg.language(config.get('rss','language'))
		fg.link(href=config.get('rss','href'), rel='related')

		client = EmailClient()

		for msgn in reversed(client.listBox(config.get('imap','mailbox'))[:config.getint('rss','maxitems')]):
			cherrypy.log("RSS Entry: "+msgn)
			em = client.getEMail(msgn)
			entry = fg.add_entry()
			entry.title(em['subject'])
			entry.author({'name': em['From']['name'], 'email': em['From']['email']})
			entry.guid(config.get("main","baseurl")+'news/'+msgn)
			entry.link({'href':config.get("main","baseurl")+'news/'+msgn, 'rel':'alternate'})
			entry.pubdate(em['date'])
			entry.content(em['body'])
		return	fg.rss_str(pretty=True)
コード例 #5
0
ファイル: handler.py プロジェクト: BlogTANG/blog-a
def feed():
    """
    Generate atom feed
    """
    entries = parse_posts(0, C.feed_count)
    fg = FeedGenerator()
    fg.id(str(len(entries)))
    fg.title(C.title)
    fg.subtitle(C.subtitle)
    fg.language(C.language)
    fg.author(dict(name=C.author, email=C.email))
    fg.link(href=C.root_url, rel='alternate')
    fg.link(href=make_abs_url(C.root_url, 'feed'), rel='self')
    for entry in entries:
        fe = fg.add_entry()
        fe.id(entry.get('url'))
        fe.title(entry.get('title'))
        fe.published(entry.get('date'))
        fe.updated(entry.get('updated') or entry.get('date'))
        fe.link(href=make_abs_url(C.root_url, entry.get('url')), rel='alternate')
        fe.author(dict(name=entry.get('author'), email=entry.get('email')))
        fe.content(entry.get('body'))

    atom_feed = fg.atom_str(pretty=True)
    return atom_feed
コード例 #6
0
ファイル: blog.py プロジェクト: matrufsc2/matrufsc2
def get_feed(atom=False):
    fg = FeedGenerator()
    domain = get_domain()
    items = get_posts({"limit": "10"}, full=True)["results"]
    fg.id("http://%s/"%domain)
    fg.title("Blog do MatrUFSC2")
    fg.description("Feed do blog do MatrUFSC2, onde noticias e novos recursos sao anunciados primeiro!")
    fg.language('pt-BR')
    fg.link({"href":"/blog/feed","rel":"self"})
    fg.updated(items[0]["posted_at"].replace(tzinfo=pytz.UTC))
    for item in items:
        entry = fg.add_entry()
        entry.title(item["title"])

        tree = html.fromstring(item["summary"])
        cleaner = Cleaner(allow_tags=[])
        tree = cleaner.clean_html(tree)

        text = tree.text_content()
        entry.description(text, True)
        entry.link({"href":item["link"],"rel":"self"})
        entry.content(item["body"])
        entry.published(item["posted_at"].replace(tzinfo=pytz.UTC))
        entry.updated(item["posted_at"].replace(tzinfo=pytz.UTC))
        entry.category({"label": item["category"]["title"], "term": item["category"]["slug"]})
        entry.id(item["id"])
    if atom:
        return fg.atom_str(pretty=True)
    else:
        return fg.rss_str(pretty=True)
コード例 #7
0
ファイル: feed.py プロジェクト: majestrate/pytracker
def generate(app, category, torrents):
    """
    generate an rss feed from category with torrents as results
    if category is None this feed is for all categories
    """
    feed = FeedGenerator()
    if category:
        url = util.fullSiteURL(app, 'feed', '{}.rss'.format(category))
    else:
        url = util.fullSiteURL(app, 'feed', 'all.rss')
    feed.link(href=url, rel="self")
    feed.id(url)
    if category:
        title = "new {} torrents on index ex invisibilis".format(category)
    else:
        title = "new torrents on index ex invisibilis"
    feed.title(title)
    feed.description(title)
    feed.author({"name": "anonymous"})
    feed.language("en")
    for torrent in torrents:
        item = feed.add_entry()
        url = util.fullSiteURL(app, torrent.downloadURL())
        item.id(torrent.infohash)
        item.link(href=url)
        item.title(torrent.title)
        item.description(torrent.summary(100))
    return feed
コード例 #8
0
ファイル: technowatch.py プロジェクト: TheBlusky/technowatch
def build():
    global fg
    fg = FeedGenerator()
    fg.title(parser.get('technowatch', 'name'))
    fg.language('en')
    fg.description(parser.get('technowatch', 'name'))
    fg.link(href=parser.get('technowatch', 'link'), rel='alternate')
    # Cleaning stories if too much
    if len(known_stories) > int(parser.get('technowatch', 'cache_max')):
        clean()
    # Sorting stories by crawled date
    for item in sorted(known_stories.values(), key=operator.itemgetter('crawledDate'), reverse=True):
        fe = fg.add_entry()
        fe.link(href=item['url'], rel='alternate')
        fe.title("[" + item['type'] + "] " + item['title'])
        fe.category({
            'label': item['type'],
            'term': item['type']
        })
        fe.author({'name': item['by']})
        fe.description(item['desc'])
        fe.pubdate(item['crawledDate'])
    # Caching RSS building
    pickle.dump(known_stories, open(cust_path + "/technowatch.data", "wb"))
    if parser.get('wsgi', 'activated') == "True":
        fg.rss_file(cust_path + '/static/rss.xml')
    if parser.get('ftp', 'activated') == "True":
        upload()
コード例 #9
0
ファイル: snapfeed.py プロジェクト: matthazinski/snapfeed
def gen_feed(user, base_url, path, debug=False):
    # Create feed
    feed = FeedGenerator()
    feed.id(urlparse.urljoin(base_url, user + '.xml'))
    feed.title('Snapchat story for ' + user)
    feed.link( href=urlparse.urljoin(base_url, user + '.xml'), rel='self' )
    feed.language('en')
    feed.description('Snapchat media')


    # Iterate through files in path, sort by unix timestamp (newest first), then add to feed
    files = sorted(os.listdir(path), reverse=True)

    for filename in files:
        split = filename.split('~')

        if split[0] != user:
            continue
        
        if os.path.splitext(filename)[1] in ['.mp4', '.jpg']:
            entry = feed.add_entry()
            entry.id(urlparse.urljoin(base_url, filename))
            entry.link(href=urlparse.urljoin(base_url, filename))
            entry.title(filename)

    
    # Write feed to disk
    feed.rss_file(os.path.join(path, user + '.xml'))
    date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")

    if debug:
        print('{0}  Regenerated {1}'.format(date, urlparse.urljoin(base_url, 
                                                               user + '.xml')))
コード例 #10
0
ファイル: hypecast.py プロジェクト: blackmad/hypecast
  def makeRss(self):
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.id('http://hypecast.blackmad.com/' + self.mode)
    fg.title('Hype Machine Robot Radio: ' + self.mode)
    fg.author( {'name':'David Blackmad','email':'*****@*****.**'} )
    fg.logo('http://dump.blackmad.com/the-hype-machine.jpg')
    fg.language('en')
    fg.link(href='http://hypecast.blackmad.com/' + self.mode)
    fg.description('Hype Machine Robot Radio: ' + self.mode)

    description = ' <br/>'.join(['%s. %s' % (index + 1, self.mk_song_id(s)) for index, s in enumerate(self.songs)])

    fe = fg.add_entry()
    fe.title(self.track_name)
    fe.description(description)
    fe.id(self.filename)
    # add length
    print(self.relative_dir)
    print(self.filename)
    fe.enclosure(url = 'http://hypecast.blackmad.com/%s' % (self.filename), type="audio/mpeg")

    rss_str = fg.rss_str()
    newItem = ET.fromstring(rss_str)[0].find('item')
    out = open(self.get_filename('xml'), 'w')
    out.write(ET.tostring(newItem))
    out.close()
    self.updateRss()
コード例 #11
0
ファイル: ming.py プロジェクト: adow/ming
 def render_atom(self):
     fg = FeedGenerator()
     fg.id(self.site_url)
     fg.title(self.site_title)
     fg.link(href = self.site_url,rel = 'alternate')
     fg.link(href = self.site_url + 'atom.xml',rel = 'self')
     fg.language('zh-cn')
     link_list = ArticleManager.sharedManager().link_list()
     for link in link_list:
         article = ArticleManager.sharedManager().article_for_link(link)
         if not article:
             continue
         fe = fg.add_entry()
         fe.id(article.article_link)
         fe.link(link = {'href':self.site_url + article.article_link})
         fe.title(article.article_title)
         fe.description(article.article_subtitle or '')
         fe.author(name = article.author or '',
                 email = article.author_email or '')
         d = datetime.strptime(article.article_publish_date,'%Y-%m-%d') 
         pubdate = datetime(year = d.year, month = d.month, day = d.day,tzinfo = UTC(8))
         fe.pubdate(pubdate) 
         article.render_content_html()
         fe.content(content = article._content_html,
                 type = 'html')
     atom_feed = fg.atom_str(pretty = True)
     return atom_feed
コード例 #12
0
ファイル: controller.py プロジェクト: alexandreblin/tvshows
def latestRss(userID):
    userID = userID.lower()

    shows = {}
    episodes = []
    today = date.today().strftime('%Y-%m-%d')
    for showID in series.getUserShowList(userID):
        shows[showID] = series.getShowInfo(userID, showID, withEpisodes=True, onlyUnseen=True)
        episodes.extend((showID, episode) for episode in shows[showID]['episodes'] if episode['airdate'] and airdateKey(episode['airdate']) < today)

    episodes.sort(key=episodeAirdateKey, reverse=True)

    feed = FeedGenerator()
    feed.id(userID)
    feed.title('%s\'s shows' % userID)
    feed.description('Unseen episodes')
    feed.link(href=request.url_root)
    feed.language('en')

    for showID, episode in episodes:
        entry = feed.add_entry()
        entry.id('%s/%s' % (showID, episode['episode_id']))
        entry.title('%s S%02dE%02d: %s' % (shows[showID]['name'], episode['season'], episode['episode'], episode['title']))

    return feed.rss_str(pretty=True)
コード例 #13
0
ファイル: bassdrive.py プロジェクト: bspeice/elektricity
    def build_feed(self):
        "Build the feed given our existing URL"
        # Get all the episodes
        page_content = str(requests.get(self.url).content)
        parser = BassdriveParser()
        parser.feed(page_content)
        links = parser.get_links()

        # And turn them into something usable
        fg = FeedGenerator()
        fg.id(self.url)
        fg.title(self.title)
        fg.description(self.title)
        fg.author({'name': self.dj})
        fg.language('en')
        fg.link({'href': self.url, 'rel': 'alternate'})
        fg.logo(self.logo)

        for link in links:
            fe = fg.add_entry()
            fe.author({'name': self.dj})
            fe.title(link[0])
            fe.description(link[0])
            fe.enclosure(self.url + link[1], 0, 'audio/mpeg')

            # Bassdrive always uses date strings of
            # [yyyy.mm.dd] with 0 padding on days and months,
            # so that makes our lives easy
            date_start = link[0].find('[')
            date_str = link[0][date_start:date_start+12]
            published = datetime.strptime(date_str, '[%Y.%m.%d]')
            fe.pubdate(UTC.localize(published))
            fe.guid((link[0]))

        return fg
コード例 #14
0
ファイル: feedTumblr.py プロジェクト: fernand0/scripts
def main():

    client = moduleSocial.connectTumblr()

    posts = client.posts('fernand0')
    
    fg = FeedGenerator()
    fg.id(posts['blog']['url'])
    fg.title(posts['blog']['title'])
    fg.author( {'name':posts['blog']['name'],'email':'*****@*****.**'} )
    fg.link( href=posts['blog']['url'], rel='alternate' )
    fg.subtitle('Alternate feed due to Tumblr GDPR restrictions')
    fg.language('en')

    print(len(posts['posts']))
    for i in range(len(posts['posts'])):
        fe = fg.add_entry()
        print(posts['posts'][i]['post_url'])
        if 'title' in posts['posts'][i]:
            title = posts['posts'][i]['title']
            print('T', posts['posts'][i]['title'])
        else:
            title = posts['posts'][i]['summary'].split('\n')[0]
            print('S', posts['posts'][i]['summary'].split('\n')[0])
        fe.title(title)
        fe.link(href=posts['posts'][i]['post_url'])
        fe.id(posts['posts'][i]['post_url'])

    print(fg.atom_file('/var/www/html/elmundoesimperfecto/tumblr.xml'))

    sys.exit()
コード例 #15
0
ファイル: slack.py プロジェクト: Konubinix/Devel
def rss(conversation,
        url,
        author_name,
        author_email,
        title,
        subtitle,
        language,
        output_path):
    """Export all the links of the conversation in a simple RSS feed"""
    from feedgen.feed import FeedGenerator
    fg = FeedGenerator()
    fg.id(url)
    fg.title(title)
    fg.author(
        {
            'name': author_name,
            'email': author_email,
        }
    )
    fg.link(
        href=url,
        rel='alternate'
    )
    if subtitle:
        fg.subtitle(subtitle)
    fg.language(language)
    for message in conversation.history():
        match = re.search(
            "^.*<(?P<url>[^>|]+)\|?(?P<title>[^>]+)?>.*$",
            message.data["text"],
            flags=re.MULTILINE
        )
        if match is not None:
            fe = fg.add_entry()
            link = match.group("url")
            title = match.group("title") or link
            date = naive_to_local(datetime.datetime.fromtimestamp(float(message.data["ts"])))
            description = message.data["text"]
            if "attachments" in message.data:
                attachment = [a for a in message.data["attachments"] if
                              a["title_link"] == link][0]
                title += " | " + attachment["title"]
                description += """

""" + attachment["text"]
            fe.id(link)
            fe.title(title)
            fe.link(href=link)
            fe.published(date)
            user = config.slack.get_user(message.data["user"])
            author = {
                "name": message.data["username"],
                "email": user.email or "noemail",
            }
            fe.author(author)
            fe.description(description)
    fg.rss_file(output_path, pretty=True)
コード例 #16
0
ファイル: bano.py プロジェクト: EliteTK/bano
def make_feedgenerator(conf):
    feedgen = FeedGenerator()
    feedgen.title('Lojban twitter feed in {lang}'.format(lang=conf['long']))
    feedgen.description('Twitter Atom feed in {lang} about the constructed language Lojban'.format(lang=conf['long']))
    feedgen.language(conf['short'])
    feedgen.link(href='{}.atom.xml'.format(conf['short']))
    feedgen.id('{}.atom.xml'.format(conf['short']))
    feedgen.generator(generator='bano', version='0.0.0', uri='https://github.com/kyrias/bano')
    return feedgen
コード例 #17
0
ファイル: mockfeed_gen.py プロジェクト: lazybios/ReadKeyRSS
def create_mock_fg():
    fg = FeedGenerator()
    fg.id(FEED_ID)
    fg.title('Some Test Feed')
    fg.author({'name': 'Edfward', 'email': '*****@*****.**'})
    fg.subtitle('Test feed subtitle!')
    fg.link(href=FEED_ID, rel='self')
    fg.language('en')
    return fg
コード例 #18
0
ファイル: rss.py プロジェクト: hackcyprus/jobber
def build_feed_generator(query=None):
    gen = FeedGenerator()
    gen.title(FEED_TITLE)
    gen.subtitle(FEED_SUBTITLE)
    gen.language(FEED_LANG)

    feed_link = url_for('views.feed', query=query, _external=True)
    gen.link(href=feed_link, rel='self', type='application/rss+xml')

    return gen
コード例 #19
0
ファイル: rss.py プロジェクト: lmergner/contrivers
 def create_generator(self):
     """ Setup and return a feedgen generator object """
     fg = FeedGenerator()
     fg.title(self.title)
     fg.id(self.root_url)
     fg.link(href=self.root_url, rel='alternate')
     fg.language(u'en')
     fg.description(self.description)
     fg.rights(u"Copyright Contrivers' Review {}".format(datetime.datetime.now().year))
     return fg
コード例 #20
0
def generate_empty_rss_feed(group_name):

    fg = FeedGenerator()
    fg.title("VTS Raspored - Grupa " + group_name)
    fg.author( {'name':'Veselin Romic','email':'*****@*****.**'} )
    fg.language('sr')
    fg.description("Automatski se salje notifikacija kad se promeni grupni raspored.")
    fg.link(href='https://eref.vts.su.ac.rs/')

    return fg.rss_str(pretty=True)
コード例 #21
0
def main(argv):
    ap = argparse.ArgumentParser(
        description='''
Render RSS and Atom feeds from a CSV of food inspection data.
''')
    ap.add_argument(
        '-v', '--verbose', action='count', dest='verbosity', default=0,
        help='increase global logging verbosity; can be used multiple times')
    ap.add_argument(
       '-f', '--format', choices=['rss', 'atom'], default='atom',
       help='''
specify the format to use when rendering the feed (default: %(default)s)')
''')
    ap.add_argument(
        '-n', '--num_incidents', metavar='<num>', type=int, default=10,
        help='render <num> recent incidents in the feed (default: %(default)s)')
    ap.add_argument(
        'flavor', nargs='?', default='all', choices=['all', 'failures'],
        help='select the flavor of feed to render (default: %(default)s)')

    args = ap.parse_args()

    logging.basicConfig(
            level=logging.ERROR - args.verbosity * 10,
            style='{',
            format='{}: {{message}}'.format(ap.prog))

    fg = FeedGenerator()
    fg.id('http://pgriess.github.io/dallas-foodscores/')
    fg.link(href=fg.id(), rel='self')
    fg.title('Dallas Food Inspection Scores')
    fg.subtitle('''
Food inspection scores from the official City of Dallas dataset; updated daily
''')
    fg.description(fg.subtitle())
    fg.language('en')
    fg.author(
        name='Peter Griess',
        email='*****@*****.**',
        uri='https://twitter.com/pgriess')

    for i in get_inspections_to_feed(sys.stdin, args.num_incidents,
            args.flavor):
        fe = fg.add_entry()
        fe.title('{name} at {address} scored {score}'.format(
            name=i.name, address=i.address, score=i.score))
        fe.id(fg.id() + '#!/' + str(abs(hash(i))))
        fe.link(href=fe.id(), rel='alternate')
        fe.content(fe.title())
        fe.published(TZ.localize(i.date))

    if args.format == 'atom':
        print(fg.atom_str(pretty=True))
    else:
        print(fg.rss_str(pretty=True))
コード例 #22
0
ファイル: views.py プロジェクト: ben174/angrates
def rss(request):
    # track it!
    #   v=1              // Version.
    #   &tid=UA-XXXXX-Y  // Tracking ID / Property ID.
    #   &cid=555         // Anonymous Client ID.

    #   &t=pageview      // Pageview hit type.
    #   &dh=mydemo.com   // Document hostname.
    #   &dp=/home        // Page.
    #   &dt=homepage     // Title.
    angrates_uuid = uuid.UUID('f93c5388-f60b-5159-bbfc-d08d6f7b401f')
    x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
    if x_forwarded_for:
        ip = x_forwarded_for.split(',')[0]
    else:
        ip = request.META.get('REMOTE_ADDR')

    cid = uuid.uuid5(angrates_uuid, ip)

    data = {
        'v': 1,
        'tid': 'UA-19269567-1',
        'cid': cid,
        't': 'pageview',
        'dh': 'armstrongandgettybingo.com',
        'dp': '/rss/',
        'dt': 'Podcast',
    }

    requests.post('https://www.google-analytics.com/collect', data=data)

    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.id('http://www.armstrongandgettybingo.com/rss')
    fg.podcast.itunes_category('News & Politics', 'Conservative (Right)')
    fg.podcast.itunes_explicit('no')
    fg.title('The Armstrong and Getty Show (Bingo)')
    fg.author( {'name':'Ben Friedland','email':'*****@*****.**'} )
    fg.link( href='http://www.armstrongandgettybingo.com', rel='alternate' )
    fg.logo('https://s3-us-west-1.amazonaws.com/bencast/bingologo.png')
    fg.subtitle('Armstrong and Getty Bingo')
    fg.description('The Armstrong and Getty Show - Unofficial Feed including Archives back to 2001.')
    fg.link( href='http://www.armstrongandgettybingo.com/rss', rel='self' )
    fg.language('en')
    pacific = pytz.timezone('America/Los_Angeles')

    for hour in Hour.objects.all().order_by('-pub_date'):
        fe = fg.add_entry()
        fe.id(hour.link)
        fe.title(hour.title)
        fe.description(hour.description)
        fe.enclosure(hour.link, 0, 'audio/mpeg')
        fe.published(pacific.localize(hour.pub_date))
    return HttpResponse(fg.rss_str(pretty=True), content_type='application/rss+xml')
コード例 #23
0
ファイル: feed_package.py プロジェクト: accre/accre.github.io
def create_fg():
  # Create the feed
  fg = FeedGenerator()
  fg.id("http://www.accre.vanderbilt.edu")
  fg.title("ACCRE's Status Feed")
  fg.author(dict(name="Josh Arnold", email="*****@*****.**"))
  fg.link(href="http://www.accre.vanderbilt.edu", rel="alternate")
  fg.logo("http://www.accre.vanderbilt.edu/"
          "wp-content/themes/ashford/favicon.ico")
  fg.subtitle("ACCRE's Status Feed")
  fg.language('en')
  return fg
コード例 #24
0
ファイル: tasks.py プロジェクト: jdelman/potato
def setup_feed():
    fg = FeedGenerator()

    fg.load_extension("podcast")

    fg.language("en")
    fg.id("https://jdelman.me/potato")
    fg.author(name="Potato", email="*****@*****.**")
    fg.link(href="https://jdelman.me/potato", rel="alternate")
    fg.logo("https://jdelman.me/static/potato.jpg")
    fg.title("Potato - Josh's Saved Videos")
    fg.subtitle("Automatically generated RSS.")

    return fg
コード例 #25
0
ファイル: handlers.py プロジェクト: piraz/firenado
    def get(self):
        fg = FeedGenerator()
        fg.id("http://test.ts")
        fg.title("My Test Feed")
        fg.icon("https://avatars1.githubusercontent.com/u/715660?v=3&s=32")
        fg.author({'name': "The Author", 'email': "*****@*****.**"})

        fg.link(href="http://example.org/index.atom?page=2", rel="next")

        fg.link(href="http://test.ts", rel="alternate")
        fg.logo("https://avatars1.githubusercontent.com/u/715660?v=3&s=32")
        fg.description("Este é o monstro do lago 1")
        fg.subtitle("This is an example feed!")
        fg.language("en-us")
        # Handle this:
        #< sy:updatePeriod > hourly < / sy:updatePeriod >
        #< sy:updateFrequency > 1 < / sy:updateFrequency >

        fg.lastBuildDate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        fi = fg.add_item()
        fi.id("http://test.ts/id/1", )
        #fi.link(link="http://test.ts/id/1")
        fi.title("Monstro do Lago 1")
        fi.description("Este é o monstro do lago 1")
        fi.comments("http://test.ts/id/1/comments")
        fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        fi = fg.add_item()
        fi.id("http://test.ts/id/2")
        fi.title("Monstro do Lago 2")
        fi.description("Este é o monstro do lago 2")
        fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        #test = fg.atom_str(pretty=True)

        rss_str = fg.rss_str(pretty=True)
        self.set_header("Content-Type", 'application/xml; charset="utf-8"')
        #self.set_header("Content-Disposition",
        # "attachment; filename='test.xml'")
        self.write(rss_str)


        #if regexp.search(word) is not None:
        #    print
        #    'matched'
        if self.is_browser_mobile():
            print("buu")
        else:
            print(self.request.headers["User-Agent"])
コード例 #26
0
ファイル: builder.py プロジェクト: brandon-rhodes/blog
def render_feed(text_paths, outpath):
    # http://rhodesmill.org/brandon/feed
    # http://rhodesmill.org/brandon/category/python/feed
    # http://rhodesmill.org/brandon/feed/atom/

    t0 = datetime.min.time()

    def fix(d):
        dt = datetime.combine(d, t0)
        return timezone('US/Eastern').localize(dt)

    posts = [post_info(path) for path in text_paths if date_of(path)]
    posts = sorted(posts, key=lambda post: post['date'])
    posts = posts[-1:]
    most_recent_date = max(post['date'] for post in posts)

    def full(url):
        return 'http://rhodesmill.org/' + url.lstrip('/')

    fg = FeedGenerator()
    fg.id(full('/'))
    fg.author({'name': 'Brandon Rhodes'})
    fg.language('en')
    fg.link(href=full('/brandon/'), rel='alternate')
    if 'python' in outpath:
        fg.link(href=full('/brandon/category/python/feed/'), rel='self')
    else:
        fg.link(href=full('/brandon/feed/'), rel='self')
    fg.subtitle('Thoughts and ideas from Brandon Rhodes')
    fg.title("Let's Discuss the Matter Further")
    fg.updated(fix(most_recent_date))

    for post in posts:
        url = full(post['url_path'])
        excerpt = truncate_at_more(post['body_html'], url)

        fe = fg.add_entry()
        fe.content(excerpt, type='html')
        fe.guid(url, permalink=True)
        fe.id(url)
        fe.link({'href': url})
        fe.published(fix(post['date']))
        fe.title(post['title'])
        fe.updated(fix(post['date']))

    rss = fg.rss_str(pretty=True)
    fg.link(href=full('/brandon/feed/atom/'), rel='self', replace=True)
    atom = fg.atom_str(pretty=True)

    return rss, atom
コード例 #27
0
 def get_feedgenerator(self):
     fg = FeedGenerator()
     fg.id('http://pod.w-me.net')
     fg.title('W-Me Podcast')
     fg.description('W-Me podcast')
     fg.author( {'name':'Alex Dai','email':'*****@*****.**'} )
     fg.link( href='http://pod.w-me.net', rel='alternate' )
     fg.logo('http://pandodaily.files.wordpress.com/2012/08/shutterstock_58664.jpg')
     #fg.subtitle('This is a cool feed!')
     fg.link( href='http://pod.w-me.net/feed.atom', rel='self' )
     fg.language('en')
     fg.load_extension('podcast')
     fg.podcast.itunes_category('Technology', 'Podcasting')   
     return fg
コード例 #28
0
def feedAtom():
    todos = session.query(Todo).join(Todo.images).all()
    fg = FeedGenerator()
    fg.title('CityTodo Atom-Feed')
    fg.language('en')
    fg.id('http://localhost:5000/feed/rss')
    fg.link( href='http://localhost:5000/feed/rss', rel='self' )
    for todo in todos:
        fe = fg.add_entry()
        fe.id(str(todo.id))
        fe.link(href='http://localhost:5000/todo/'+str(todo.id), rel='self' )
        fe.title(todo.name)
        fe.content(todo.description)
    atomfeed = fg.atom_str(pretty=True)
    return atomfeed
コード例 #29
0
def daily_to_rss(daily):
    feed_generator = FeedGenerator()
    feed_generator.id(daily.url)
    feed_generator.link(href=daily.url, rel='alternate')
    feed_generator.description(u'RSS feed generated from: {}'.format(daily.url))
    feed_generator.title(u'Daily Activity Log: {}'.format(daily.url))
    feed_generator.language('en')

    for entry in daily.entries():
        feed_entry = feed_generator.add_entry()
        feed_entry.title(u'{}: {}'.format(entry.type, entry.subject))
        feed_entry.description(description=rss_description_from(entry))
        feed_entry.pubdate(entry.date_rfc2822())

    return feed_generator.rss_str(pretty=True)
コード例 #30
0
ファイル: sphinxfeed.py プロジェクト: lsaffre/sphinxfeed
def create_feed_container(app):
    #from feedformatter import Feed
    feed = FeedGenerator()
    feed.title(app.config.project)
    feed.link(href=app.config.feed_base_url)
    feed.author(dict(name=app.config.feed_author))
    feed.description(app.config.feed_description)
    
    if app.config.language:
        feed.language(app.config.language)
    if app.config.copyright:
        feed.copyright(app.config.copyright)
    app.builder.env.feed_feed = feed
    if not hasattr(app.builder.env, 'feed_items'):
        app.builder.env.feed_items = {}
コード例 #31
0
def execute():
    feed_items = {}
    added_entry_urls = set()
    sections = {
        'jap': u"日本の昔話 (Japanese Legends)",
        'minwa': u"日本の民話 (Japanese Folktales)",
        'world': u"世界の昔話 (World Folktales)",
        'aesop': u"イソップ童話 (Aesop's Fables)",
        'kobanashi': u"江戸小話 (Edo Short Stories)",
        'kaidan': u"百物語 (Japanese Ghost Stories)",
    }
    for section in sections:
        feed_items[section] = []
    for batch in [_month_urls(section) for section in sections]:
        for section, month, month_url in batch:
            root = _fetch_root(month_url)

            for link in root.cssselect('a'):
                url = urljoin(month_url, link.get('href'))
                if url in added_entry_urls:
                    continue
                if re.match(
                        r'^http://hukumusume.com/douwa/pc/(jap|minwa|world|aesop|kobanashi|kaidan)/{:02}/\w+\.html?$'
                        .format(month),
                        url,
                ):
                    title = link.text
                    if not title:
                        continue

                    table = link.xpath('./ancestor::table[1]')[0]
                    texts = table.itertext()
                    description = ''
                    for text1, text2 in zip(texts, list(texts)[1:]):
                        if u'内容 :' in text1:
                            description = (text1 + text2)[len(u'内容 :'):]

                    try:
                        image_relative_url = table.cssselect('img')[0].get(
                            'src')
                        # Avoid weird case with "001", "002" links in Aesop feed (and maybe elsewhere).
                        if 'corner' in image_relative_url:
                            continue
                        image_url = urljoin(month_url, image_relative_url)
                    except IndexError:
                        # Every one has an image.
                        continue

                    feed_items[section].append({
                        'url': url,
                        'title': link.text,
                        'description': description,
                        'image_url': image_url,
                    })
                    added_entry_urls.add(url)

    for section, title in sections.items():
        fg = FeedGenerator()
        fg.id('http://hukumusume.com/douwa/pc/{}/index.html'.format(section))
        fg.title(title)
        fg.language('ja')

        for item in feed_items[section]:
            entry = fg.add_entry()
            entry.id(item['url'])
            entry.title(item['title'])
            entry.link(href=item['url'], rel='alternate')
            entry.summary(item['description'])
            entry.content('<img src="{}" />'.format(item['image_url']),
                          type='CDATA')

        fg.atom_file(
            'manabi/static/reader/feeds/hukumusume-{}.rss'.format(section))
コード例 #32
0

episodedir = 'episodes'

with open('show.json', encoding="utf-8") as f1:
    showinfo = json.load(f1)

fg = FeedGenerator()

hosthead = showinfo["host-address"]
fg.title(showinfo["title"])
fg.author(showinfo["author"])
fg.logo(hosthead + showinfo["logo"]["url"])
fg.subtitle(showinfo["description"])
fg.link(href=showinfo["link"], rel='self')
fg.language(showinfo["language"])

fg.load_extension('podcast')

fg.podcast.itunes_category(showinfo["itunes-category"]["cat1"],
                           showinfo["itunes-category"]["cat2"])
fg.podcast.itunes_owner(showinfo["author"]["name"],
                        showinfo["author"]["email"])
fg.podcast.itunes_image(hosthead + showinfo["logo"]["url"])

for directory, subdirectories, files in os.walk(episodedir):
    for file in files:
        if file.endswith('.json'):
            filename = episodedir + "/" + file
            with open(filename) as f2:
                episodeinfo = json.load(f2)
コード例 #33
0
ファイル: rss.py プロジェクト: davidp94/granary
def from_activities(activities,
                    actor=None,
                    title=None,
                    feed_url=None,
                    home_page_url=None,
                    hfeed=None):
    """Converts ActivityStreams activities to an RSS 2.0 feed.

  Args:
    activities: sequence of ActivityStreams activity dicts
    actor: ActivityStreams actor dict, the author of the feed
    title: string, the feed title
    feed_url: string, the URL for this RSS feed
    home_page_url: string, the home page URL
    hfeed: dict, parsed mf2 h-feed, if available

  Returns:
    unicode string with RSS 2.0 XML
  """
    try:
        iter(activities)
    except TypeError:
        raise TypeError('activities must be iterable')

    if isinstance(activities, (dict, basestring)):
        raise TypeError('activities may not be a dict or string')

    fg = FeedGenerator()
    fg.id(feed_url)
    assert feed_url
    fg.link(href=feed_url, rel='self')
    if home_page_url:
        fg.link(href=home_page_url, rel='alternate')
    # TODO: parse language from lang attribute:
    # https://github.com/microformats/mf2py/issues/150
    fg.language('en')
    fg.generator('granary', uri='https://granary.io/')

    hfeed = hfeed or {}
    actor = actor or {}
    image = util.get_url(hfeed, 'image') or util.get_url(actor, 'image')
    if image:
        fg.image(image)

    props = hfeed.get('properties') or {}
    content = microformats2.get_text(util.get_first(props, 'content', ''))
    summary = util.get_first(props, 'summary', '')
    desc = content or summary or '-'
    fg.description(desc)  # required
    fg.title(title or util.ellipsize(desc))  # required

    latest = None
    enclosures = False
    for activity in activities:
        obj = activity.get('object') or activity
        if obj.get('objectType') == 'person':
            continue

        item = fg.add_entry()
        url = obj.get('url')
        item.id(obj.get('id') or url)
        item.link(href=url)
        item.guid(url, permalink=True)

        item.title(obj.get('title') or obj.get('displayName')
                   or '-')  # required
        content = microformats2.render_content(
            obj, include_location=True,
            render_attachments=False) or obj.get('summary')
        if content:
            item.content(content, type='CDATA')

        item.category(
            [{
                'term': t['displayName']
            } for t in obj.get('tags', [])
             if t.get('displayName') and t.get('verb') not in ('like', 'react',
                                                               'share')])

        author = obj.get('author', {})
        item.author({
            'name': author.get('displayName') or author.get('username'),
            'uri': author.get('url'),
        })

        published = obj.get('published') or obj.get('updated')
        if published:
            try:
                dt = mf2util.parse_datetime(published)
                if not isinstance(dt, datetime):
                    dt = datetime.combine(dt, time.min)
                if not dt.tzinfo:
                    dt = dt.replace(tzinfo=util.UTC)
                item.published(dt)
                if not latest or dt > latest:
                    latest = dt
            except ValueError:  # bad datetime string
                pass

        for att in obj.get('attachments', []):
            stream = util.get_first(att, 'stream') or att
            if not stream:
                continue

            url = stream.get('url') or ''
            mime = mimetypes.guess_type(url)[0] or ''
            if (att.get('objectType') in ENCLOSURE_TYPES
                    or mime and mime.split('/')[0] in ENCLOSURE_TYPES):
                enclosures = True
                item.enclosure(url=url, type=mime,
                               length='REMOVEME')  # TODO: length (bytes)

                item.load_extension('podcast')
                duration = stream.get('duration')
                if duration:
                    item.podcast.itunes_duration(duration)

    if enclosures:
        fg.load_extension('podcast')
        fg.podcast.itunes_author(
            actor.get('displayName') or actor.get('username'))
        if summary:
            fg.podcast.itunes_summary(summary)
        fg.podcast.itunes_explicit('no')
        fg.podcast.itunes_block(False)

    if latest:
        fg.lastBuildDate(latest)

    return fg.rss_str(pretty=True).decode('utf-8').replace(
        ' length="REMOVEME"', '')
コード例 #34
0
ファイル: renderers.py プロジェクト: jhonsnow456/bodhi
    def render(data, system):
        """
        Render the given data as an RSS view.

        If the request's content type is set to the default, this function will change it to
        application/rss+xml.

        Args:
            data (dict): A dictionary describing the information to be rendered. The information can
                be different types of objects, such as updates, users, comments, or overrides.
            system (pyramid.events.BeforeRender): Used to get the current request.
        Returns:
            str: An RSS document representing the given data.
        """
        request = system.get('request')
        if request is not None:
            response = request.response
            ct = response.content_type
            if ct == response.default_content_type:
                response.content_type = 'application/rss+xml'

        if 'updates' in data:
            key = 'updates'
            feed_title = 'Released updates'
        elif 'users' in data:
            key = 'users'
            feed_title = 'Bodhi users'
        elif 'comments' in data:
            key = 'comments'
            feed_title = 'User comments'
        elif 'overrides' in data:
            key = 'overrides'
            feed_title = 'Update overrides'
        else:
            # This is a request we don't know how to render. Let's return BadRequest and log.
            log.debug('Unable to render RSS feed for data: %s', data)
            # See if we have a request so we can set a code without raising an Exception
            if request is not None:
                response.status = HTTPBadRequest.code
                return 'Invalid RSS feed request'
            else:
                raise HTTPBadRequest('Invalid RSS feed request')

        feed_description_list = []
        for k in request.GET.keys():
            feed_description_list.append('%s(%s)' % (k, request.GET[k]))
        if feed_description_list:
            feed_description = 'Filtered on: ' + ', '.join(feed_description_list)
        else:
            feed_description = "All %s" % (key)

        feed = FeedGenerator()
        feed.title(feed_title)
        feed.link(href=request.url, rel='self')
        feed.description(feed_description)
        feed.language('en')

        def linker(route, param, key):
            def link_dict(obj):
                return dict(href=request.route_url(route, **{param: obj[key]}))
            return link_dict

        def describe_update(alias, notes, builds):
            """
            Wrap calls to operator.itemgetter to retrieve notes and builds list.

            Methods are used to fill feed entry values, so we must use a wrapper
            to get an HTML formatted description from the `notes` and the `builds`
            properties of the update.

            For example:
            getter = describe_update(operator.itemgetter('notes'),operator.itemgetter('builds'))
            description_value = getter(update_data)

            Args:
                alias (operator.itemgetter): A callable object which returns update alias
                    as string.
                notes (operator.itemgetter): A callable object which returns update notes
                    as string.
                builds (operator.itemgetter): A callable object which returns a list of builds
                    associated to the update.
            Returns:
                function: A function which accepts a dict representing an update as parameter.
            """
            def describe(*args, **kwargs):
                text = f'# {alias(*args, **kwargs)}\n'
                text += '## Packages in this update:\n'
                for p in builds(*args, **kwargs):
                    text += f'* {p.nvr}\n'
                text += f'## Update description:\n{notes(*args, **kwargs)}'
                return markup(None, text, bodhi=False)
            return describe

        getters = {
            'updates': {
                'title': operator.itemgetter('title'),
                'link': linker('update', 'id', 'alias'),
                'description': describe_update(operator.itemgetter('alias'),
                                               operator.itemgetter('notes'),
                                               operator.itemgetter('builds')),
                'pubDate': lambda obj: utc.localize(obj['date_submitted']),
            },
            'users': {
                'title': operator.itemgetter('name'),
                'link': linker('user', 'name', 'name'),
                'description': operator.itemgetter('name'),
            },
            'comments': {
                'title': operator.itemgetter('rss_title'),
                'link': linker('comment', 'id', 'id'),
                'description': operator.itemgetter('text'),
                'pubDate': lambda obj: utc.localize(obj['timestamp']),
            },
            'overrides': {
                'title': operator.itemgetter('nvr'),
                'link': linker('override', 'nvr', 'nvr'),
                'description': operator.itemgetter('notes'),
                'pubDate': lambda obj: utc.localize(obj['submission_date']),
            },
        }

        for value in reversed(data[key]):
            feed_item = feed.add_item()
            for name, getter in getters[key].items():
                # Because we have to use methods to fill feed entry attributes,
                # it's done by getting methods by name and calling them
                # on the same line.
                feed_value = getter(value)
                if name == "description":
                    feed_value = INVALID_CHARS_RE.sub("", feed_value)
                getattr(feed_item, name)(feed_value)

        return feed.rss_str()
コード例 #35
0
ファイル: script.py プロジェクト: domingoclub/dok
# Compile, minimize css
if os.path.isfile(SCSS_FILE):
    compile_scss(SCSS_MAP)
    minify_css(CSS_MAP)
    print(":: CSS — compiled and minified")

# ------------------------------------------------
# RSS feed
# ------------------------------------------------

fg = FeedGenerator()
fg.title(settings['title'])
fg.author({'name': settings['title']})
fg.link(href=settings['main_url'], rel='alternate')
fg.subtitle(settings['description'])
fg.language(settings['language'])
rssfeed = fg.rss_str(pretty=True)

for article in articles:
    date = articles[article]["last_update"] if articles[article][
        "last_update"] else articles[article]["publication_date"]
    if not articles[article]["draft"] and articles[article]["content"]:
        fe = fg.add_entry()
        fe.title(articles[article]["title"])
        fe.link(href=settings['main_url'] + '/' + articles[article]["slug"] +
                '.html')
        fe.author({'name': settings['title']})
        fe.description(articles[article]["content"][:1800] + '...')
        fe.pubDate(
            datetime.strptime(date, '%d/%m/%Y').strftime(
                '%a %b %d %H:%M:%S %Y') + ' +0200')
コード例 #36
0
def gen_podcast_feed(url):
    fg = FeedGenerator()
    fg.load_extension('podcast')

    #url = "https://www.ivoox.com/audios-biblioteca-tizca_s0_f2275262_p2_1.html" #Auto-load
    #url = "https://www.ivoox.com/podcast-biblioteca-tizca_sq_f1370364_1.html" #formato normal
    #url = 'https://www.ivoox.com/escuchar-lvdh_nq_189868_1.html' # formato canal - Hay que entrar en el podcast
    #url= 'https://www.ivoox.com/biblioteca-de-trantor_bk_list_39874_1.html' #lista
    #url = 'https://www.ivoox.com/escuchar_bk_list_5758_1.html' #lista

    pod_type = ''
    i = 1
    url = recortar_url(url)
    fp = requests.get(url + str(i) + ".html")
    soup = BeautifulSoup(fp.content, 'html.parser', from_encoding="utf8")

    #soup = soup.find('div',{"id":"main"})

    if url.find('_bk_list_') >= 0:
        pod_type = 'lista'
        soup = soup.find('div', {"id": "main"})
    elif url.find('-audios-mp3_rf_') >= 0:
        pod_type = 'capitulo'
    else:
        pod_type = url[url.find('.com/') + 5:url.find('-')]
        soup = soup.find('div', {"id": "main"})

    if pod_type == 'podcast' or 'lista' or 'escuchar':
        search_term = soup.find(class_="row").findChildren()
    if pod_type == 'audios':
        search_term = soup.find(id="channelPageAudios").findChildren()

    if pod_type == 'escuchar':
        url = podcast_election(soup, url)
        fp = requests.get(url)
        url = recortar_url(url)
        soup = BeautifulSoup(fp.content, 'html.parser')
        soup = soup.find('div', {"id": "main"})

    if pod_type == 'capitulo':
        """ fg.id(soup.find('a', {'itemprop': 'item'}).get('href'))
        fg.logo(soup.find('img', {'class': 'main'}).get('src'))
        fg.title(soup.find('a',{'itemprop': 'item'}).get('title'))
        #fg.id(soup.find('a',{'itemprop':'item'}).get('title'))
        fg.author({'name':soup.find('h1',{'class':'pull-left'}).text.strip()})
        fg.subtitle(soup.find('p', {'class': 'description'}).text.strip())
        fg.link(href=soup.find('a', {'itemprop': 'item'}).get('href'),rel="alternate")

        fe = fg.add_entry(order='append')
        fe.id(url)
        fe.content( soup.find('h1', {'class': 'pull-left'}).text.strip())
        fe.enclosure(make_mp3_url(url+'1.html'), 0, 'audio/mpeg') """

        download_mp3(make_mp3_url(url + '1.html'),
                     soup.find('h1', {
                         'class': 'pull-left'
                     }).text.strip())
    else:
        fg.id(soup.find('div', {"class": "wrapper"}).a.get('href'))
        fg.title(soup.find(id="list_title_new").text)
        fg.author({'name': soup.find(class_="info").a.text})
        fg.link(href=soup.find(class_="wrapper").a.get('href'),
                rel='alternate')
        fg.logo(soup.find(class_="imagen-ficha").img.get('src'))
        if soup.find(class_="overview").text.strip():
            fg.subtitle(soup.find(class_="overview").text.strip())
        else:
            fg.subtitle(' ')

        fg.language('es')

        while len(list(search_term)) > 0:
            if soup.find(class_="jumbotron"):
                break
            fg = sacarItems(soup, fg)
            i = i + 1

            fp = requests.get(url + str(i) + ".html")
            soup = BeautifulSoup(fp.content.decode('utf8'), 'html.parser')
            soup = soup.find('div', {"id": "main"})

            if pod_type == 'audios':
                search_term = soup.find(id="channelPageAudios").findChildren()
            if pod_type == 'podcast' or 'lista' or 'escuchar':
                search_term = soup.find(class_="row").findChildren()

        fg.rss_file('podcast.rss', pretty=True)
コード例 #37
0
def main():
    if len(sys.argv) != 2 or not (
            sys.argv[1].endswith('rss') or sys.argv[1].endswith('atom')
            or sys.argv[1] == 'torrent' or sys.argv[1] == 'podcast'):
        print(USAGE)
        exit()

    arg = sys.argv[1]

    fg = FeedGenerator()
    fg.id('http://lernfunk.de/_MEDIAID_123')
    fg.title('Testfeed')
    fg.author({'name': 'Lars Kiesow', 'email': '*****@*****.**'})
    fg.link(href='http://example.com', rel='alternate')
    fg.category(term='test')
    fg.contributor(name='Lars Kiesow', email='*****@*****.**')
    fg.contributor(name='John Doe', email='*****@*****.**')
    fg.icon('http://ex.com/icon.jpg')
    fg.logo('http://ex.com/logo.jpg')
    fg.rights('cc-by')
    fg.subtitle('This is a cool feed!')
    fg.link(href='http://larskiesow.de/test.atom', rel='self')
    fg.language('de')
    fe = fg.add_entry()
    fe.id('http://lernfunk.de/_MEDIAID_123#1')
    fe.title('First Element')
    fe.content(
        '''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Tamen
            aberramus a proposito, et, ne longius, prorsus, inquam, Piso, si
            ista mala sunt, placet. Aut etiam, ut vestitum, sic sententiam
            habeas aliam domesticam, aliam forensem, ut in fronte ostentatio
            sit, intus veritas occultetur? Cum id fugiunt, re eadem defendunt,
            quae Peripatetici, verba.''')
    fe.summary(u'Lorem ipsum dolor sit amet, consectetur adipiscing elit…')
    fe.link(href='http://example.com', rel='alternate')
    fe.author(name='Lars Kiesow', email='*****@*****.**')

    if arg == 'atom':
        print_enc(fg.atom_str(pretty=True))
    elif arg == 'rss':
        print_enc(fg.rss_str(pretty=True))
    elif arg == 'podcast':
        # Load the podcast extension. It will automatically be loaded for all
        # entries in the feed, too. Thus also for our “fe”.
        fg.load_extension('podcast')
        fg.podcast.itunes_author('Lars Kiesow')
        fg.podcast.itunes_category('Technology', 'Podcasting')
        fg.podcast.itunes_explicit('no')
        fg.podcast.itunes_complete('no')
        fg.podcast.itunes_new_feed_url('http://example.com/new-feed.rss')
        fg.podcast.itunes_owner('John Doe', '*****@*****.**')
        fg.podcast.itunes_summary('Lorem ipsum dolor sit amet, consectetur ' +
                                  'adipiscing elit. Verba tu fingas et ea ' +
                                  'dicas, quae non sentias?')
        fg.podcast.itunes_type('episodic')
        fe.podcast.itunes_author('Lars Kiesow')
        fe.podcast.itunes_season(1)
        fe.podcast.itunes_episode(1)
        fe.podcast.itunes_title('First podcast episode')
        fe.podcast.itunes_episode_type('full')
        print_enc(fg.rss_str(pretty=True))

    elif arg == 'torrent':
        fg.load_extension('torrent')
        fe.link(href='http://example.com/torrent/debian-8-netint.iso.torrent',
                rel='alternate',
                type='application/x-bittorrent, length=1000')
        fe.torrent.filename('debian-8.4.0-i386-netint.iso.torrent')
        fe.torrent.infohash('7661229811ef32014879ceedcdf4a48f256c88ba')
        fe.torrent.contentlength('331350016')
        fe.torrent.seeds('789')
        fe.torrent.peers('456')
        fe.torrent.verified('123')
        print_enc(fg.rss_str(pretty=True))

    elif arg.startswith('dc.'):
        fg.load_extension('dc')
        fg.dc.dc_contributor('Lars Kiesow')
        if arg.endswith('.atom'):
            print_enc(fg.atom_str(pretty=True))
        else:
            print_enc(fg.rss_str(pretty=True))

    elif arg.startswith('syndication'):
        fg.load_extension('syndication')
        fg.syndication.update_period('daily')
        fg.syndication.update_frequency(2)
        fg.syndication.update_base('2000-01-01T12:00+00:00')
        if arg.endswith('.rss'):
            print_enc(fg.rss_str(pretty=True))
        else:
            print_enc(fg.atom_str(pretty=True))

    elif arg.endswith('atom'):
        fg.atom_file(arg)

    elif arg.endswith('rss'):
        fg.rss_file(arg)
コード例 #38
0
ファイル: feed_itunes.py プロジェクト: jkalamarz/WeszloFeed
        return os.path.getctime(path_to_file)
    else:
        stat = os.stat(path_to_file)
        try:
            return stat.st_birthtime
        except AttributeError:
            return stat.st_mtime


fg = FeedGenerator()
fg.load_extension('podcast')
fg.title(u'Weszło FM')
fg.podcast.itunes_author(u'Weszło FM')
fg.link(href='http://weszlo.fm/', rel='alternate')
fg.subtitle(u'Nieoficjalny podcast WeszłoFM')
fg.language('pl')
fg.copyright('cc-by-Weszlo')
fg.podcast.itunes_summary(u'Podcast WeszłoFM')
fg.podcast.itunes_owner('Krzysztof Stanowski',
                        '*****@*****.**')
fg.link(href='https://www.simx.mobi/weszlo/', rel='self')
fg.podcast.itunes_explicit('no')
fg.image('https://i1.sndcdn.com/avatars-000421118988-38c4cq-t200x200.jpg')
fg.podcast.itunes_image(
    'https://i1.sndcdn.com/avatars-000421118988-38c4cq-t200x200.jpg')
fg.podcast.itunes_category('Sport', 'Sport News')

root_path = os.getcwd() + "/"
only_folders_from_root_path = [
    f for f in listdir(root_path) if isdir(join(root_path, f))
]
コード例 #39
0
def main():
    all_tags = {}
    post_data = []
    for post in get_posts():
        out_file = post[len('posts/'):]
        output, title = get_post_data(post)
        header, date, tags_raw = title[1], title[2], title.get(6, "")

        tags = tags_raw.split(",")
        tags_html = get_html_tags(tags)

        post_data.append(
            (out_file, title[1], title[2], post, output, tags_html))
        for tag in tags:
            if tag not in all_tags:
                all_tags[tag] = []

            all_tags[tag].append((out_file, title[1], title[2]))

        title = title[1]
        with open('docs/' + out_file, 'w') as f:
            f.write(
                TEMPLATE.format(post=output,
                                title=title,
                                subtitle=date,
                                tag=title,
                                tags=tags_html,
                                meta=""))

    for post in get_event_posts():
        out_file = post[len('event-data/'):].split('.')[0]
        m, d, y = out_file.split('-')
        date = f'{m.title()} {d}, {y}'
        out_file = f'week-of-{out_file}.html'
        with open(post) as f:
            data = yaml.safe_load(f)
            week = data['week']
        header = f'Community speakers appearing the week of {week}'
        output = [
            '<p>Events in this list are chosen because of their particularly high-caliber content. If there is an event or series that you think should be considered in this list please email <a href="mailto:[email protected]">[email protected]</a>. Community feedback is extremely welcome.</p>'
        ]
        output.append('<div class="events">')
        for day, events in data['events'].items():
            output.append('  <div class="day">')
            output.append(f'    <div class="day-name">{day}</div>')
            for event in events:
                output.append('      <div class="event">')
                output.append(
                    f'        <div class="event-group">{event["group"]}</div>')
                output.append(
                    f'        <span class="event-title">{event["title"]}</span><span class="event-time">{event["time"]}</span>'
                )
                if 'who' in event:
                    output.append(
                        f'        <span class="event-by">by {event["who"]}</span>'
                    )

                output.append(
                    f'        <a href="{event["link"]}" class="event-link">Link</a>'
                )
                output.append('      </div>')
            output.append('  </div>')
        output.append('</div>')
        output = '\n'.join(output)
        with open('docs/' + out_file, 'w') as f:
            f.write(
                TEMPLATE.format(post=output,
                                title=header,
                                subtitle=date,
                                tag=header,
                                tags='',
                                meta=''))
        post_data.append((out_file, header, date, post, output, ''))

    post_data.sort(key=lambda post: datetime.strptime(post[2], '%B %d, %Y'))
    post_data.reverse()
    notes = []
    for i, args in enumerate(post_data):
        year = args[2].split(' ')[-1]
        prev_post_year = str(datetime.today().year +
                             1) if i == 0 else post_data[i -
                                                         1][2].split(' ')[-1]
        if year != prev_post_year:
            notes.append('<h3>{}</h3>'.format(year))
        note = POST_SUMMARY.format(*args[:2], args[5], *args[2:3])
        notes.append(note)
    home_page = HOME_PAGE.format(notes="\n".join(notes))
    with open('docs/index.html', 'w') as f:
        meta = ''
        f.write(
            TEMPLATE.format(post=home_page,
                            title="",
                            tag=TAG,
                            subtitle="",
                            tags="",
                            meta=meta))

    with open('docs/style.css', 'w') as fw:
        with open('style.css') as fr:
            fw.write(fr.read())

    fg = FeedGenerator()
    for url, title, date, post, content, _ in reversed(post_data):
        fe = fg.add_entry()
        fe.id('http://learn.multiprocess.io/' + url)
        fe.title(title)
        fe.link(href='http://learn.multiprocess.io/' + url)
        fe.pubDate(
            datetime.strptime(date, '%B %d, %Y').replace(tzinfo=timezone.utc))
        fe.content(content)

    fg.id('http://learn.multiprocess.io/')
    fg.link(href='http://learn.multiprocess.io/')
    fg.title(TAG)
    fg.description(TAG)
    fg.language('en')
    fg.rss_file('docs/rss.xml')

    if not os.path.exists('docs/tags'):
        os.makedirs('docs/tags')
    for tag in all_tags:
        posts = all_tags[tag]
        with open('docs/tags/%s.html' % tag.replace(' ', '-'), 'w') as f:
            posts.sort(
                key=lambda post: datetime.strptime(post[2], '%B %d, %Y'))
            posts.reverse()
            tag_page = TAG_PAGE.format(tag)
            tag_page += "\n".join(
                [TAG_SUMMARY.format(*args) for args in posts])
            f.write(
                TEMPLATE.format(post=tag_page,
                                title="",
                                tag=TAG,
                                subtitle="",
                                tags="",
                                meta=""))
コード例 #40
0
def get_rss():
    zen_url = request.args.get('url')
    # set telegram instant view rhash if available
    tg_rhash = request.args.get('tg_rhash')

    limit_description = request.args.get('limit_description', type=int)

    if not zen_url:
        return 'url (?url=https://zen.yandex.ru/media/.../) must be set'
    parsed_url = urlparse(zen_url)
    if parsed_url.netloc != 'zen.yandex.ru':
        return 'Domain must be zen.yandex.ru'

    # validate tg_rhash
    if tg_rhash and not re.match(r'^[a-fA-F\d]+$', tg_rhash):
        return 'Invalid tg_rhash. Please, check rhash value from instant view template'

    # if not re.match(r'^/(media/)?(id/[\da-f]+|[a-z\d_]+)/?$', parsed_url.path):
    #     return 'Url is unsupported. Supported formats:<br>' \
    #            '• https://zen.yandex.ru/media/id/01234567890abcdef0123456 <br>' \
    #            '• https://zen.yandex.ru/media/nickname'

    resp = requests.get(
        zen_url, headers={'User-Agent': 'TelegramBot (like TwitterBot)'})
    doc = fromstring(resp.text)

    try:
        text = re.search(
            r'{.+}',
            doc.xpath('.//script[contains(text(), "__serverState__")]')
            [0].text)[0]
        json_data = json.loads(text)
    except:
        return abort(404)

    server_state = None
    for key in json_data:
        if '__serverState__' in key:
            server_state = json_data[key]
            break

    if server_state is None:
        return abort(404)

    items = server_state['feed'].get('items')
    items_order = server_state['feed'].get('itemsOrder')
    publisher = server_state['channel']['source']

    feed = FeedGenerator()
    feed.id('http://zen.yandex.ru/')
    feed.title(publisher.get('title'))
    feed.subtitle((publisher.get('description') or 'News').strip())
    feed.language('ru')
    feed.author({'name': '-', 'email': '-'})
    feed.link(href=zen_url, rel='alternate')
    try:
        image_logo_url = publisher.get('logo')
        feed.logo(image_logo_url)
    except:
        pass

    for oItem in items_order:
        item = items.get(oItem)
        if item.get('type') != 'image_card':
            continue

        entry = feed.add_entry()

        entry.title(item.get('title').strip())

        entry.description(item.get('text').strip()[:limit_description])

        if item.get('image'):
            item_image_url = item.get('image')
            entry.enclosure(url=item_image_url,
                            type='image/webp',
                            length='2048')

        entry_url = item.get('link').split('?')[0]
        # convert to instant view link if tg hash is provided
        if tg_rhash:
            # write original url into author field
            entry.author({'name': '', 'email': entry_url})
            entry.link({
                'href':
                TG_URL.format(url=quote_plus(entry_url), rhash=tg_rhash)
            })

        else:
            entry.link({'href': entry_url})

        try:
            entry.pubDate(
                dateparser.parse(item.get('creationTime'),
                                 settings={'RETURN_AS_TIMEZONE_AWARE': True}))
        except:
            pass

        entry.guid(item.get('id'))

    rss_response = Response(feed.rss_str(pretty=True))
    rss_response.headers.set('Content-Type',
                             'application/rss+xml; charset=utf-8')

    return rss_response
コード例 #41
0
import os
from feedgen.feed import FeedGenerator
from bs4 import BeautifulSoup

dot = "./"

fg = FeedGenerator()
fg.load_extension('base')

fg.id('https://www.finartcialist.com/v2/fr/blog/')
fg.title('blogue - finartcialist')
fg.author( {'name':'finartcialist', 'email':'*****@*****.**'})
fg.link( href="https://www.finartcialist.com", rel='alternate')
fg.subtitle("RSS - finartcialist - arts x finance")
fg.link(href="https://www.finartcialist.com", rel="self")
fg.language("fr")


fr_path = "v2/fr/blog/"

for root, subFolders, files in os.walk(dot + fr_path):
    path = os.path.basename(root)
    print(path)
    for f in files:
        if f != "index.html":
            if len(path) > 0:
                path_to_html = fr_path + path + '/' + f
            else:
                path_to_html = fr_path + f
            print(path_to_html)
            with open('./' + path_to_html) as html_text:
コード例 #42
0
if response.status_code != 200:
    print("Cannot load the list of PR content: HTTP %s received!" %  response.status_code)
    sys.exit(1)
pull_requests = response.json()

# Process the obtained list and generate the feed in memory
print("[+] Process the obtained list and generate the feed in memory (%s) items)..." % len(pull_requests))
feed_generator = FeedGenerator()
current_date = datetime.utcnow().strftime("%a, %d %B %Y %H:%M:%S GMT")  # Sun, 19 May 2002 15:21:36 GMT
feed_generator.id("https://cheatsheetseries.owasp.org/")
feed_generator.title("OWASP Cheat Sheet Series update")
feed_generator.description("List of the last updates on the content")
feed_generator.author({"name": "Core team", "email": "*****@*****.**"})
feed_generator.link({"href": "https://cheatsheetseries.owasp.org", "rel": "self"})
feed_generator.link({"href": "https://github.com/OWASP/CheatSheetSeries", "rel": "alternate"})
feed_generator.language("en")
feed_generator.icon("https://cheatsheetseries.owasp.org/gitbook/images/favicon.ico")
feed_generator.pubDate(current_date)
feed_generator.lastBuildDate(current_date)
for pull_request in pull_requests:
    # Take only merged PR
    if pull_request["merged_at"] is None:
        continue
    # Convert merge date from 2019-08-25T06:36:35Z To Sun, 19 May 2002 15:21:36 GMT
    merge_date_src = pull_request["merged_at"]
    merge_date_dst = datetime.strptime(merge_date_src, "%Y-%m-%dT%H:%M:%SZ").strftime("%a, %d %B %Y %H:%M:%S GMT")
    feed_entry = feed_generator.add_entry()
    feed_entry.id(pull_request["html_url"])
    feed_entry.title(pull_request["title"])
    feed_entry.link({"href": pull_request["html_url"], "rel": "self"})
    feed_entry.link({"href": pull_request["html_url"], "rel": "alternate"})
コード例 #43
0
# -*- coding: utf-8 -*-

from feedgen.feed import FeedGenerator

feed = FeedGenerator()

feed.title("Customed RARBG Torrent Feed")
feed.link(href="https://github.com/Apocalypsor/Rarbg")
feed.description("Make RARBG Greater Again! by Apocalypsor")
feed.language("en")
feed.logo("https://cdn.jsdelivr.net/gh/Apocalypsor/Rarbg@master/assets/favicon.png")


def getRSS(entries):
    for entry in entries:
        feedEntry = feed.add_entry()
        feedEntry.title(entry.filename)
        feedEntry.link(href=entry.download)
        feedEntry.guid(entry.download)

    response = feed.rss_str(pretty=True)

    return response


if __name__ == "__main__":
    print("rarbg.to")
コード例 #44
0
def lambda_handler(event, context):

    # job_name will be the same as the key column (episode_id) in database
    job_name = event['detail']['TranscriptionJobName']
    print(job_name)
    job = transcribe.get_transcription_job(TranscriptionJobName=job_name)
    uri = job['TranscriptionJob']['Transcript']['TranscriptFileUri']
    print(uri)

    content = urllib.request.urlopen(uri).read().decode('UTF-8')

    print(json.dumps(content))

    data = json.loads(content)

    text = data['results']['transcripts'][0]['transcript']

    # update episode_summary in database for this record
    response = table.update_item(Key={'episode_id': job_name},
                                 UpdateExpression="set episode_summary = :r",
                                 ExpressionAttributeValues={':r': text},
                                 ReturnValues="UPDATED_NEW")

    # add text file with transcript to s3 bucket
    object = s3.Object(BUCKET_NAME, job_name + '-asrOutput.txt')
    object.put(Body=text)

    # obtain all entries in database
    response = table.scan(FilterExpression=Attr('episode_int').gte(1))

    # save object with the items themselves
    items = response['Items']
    #print(items)

    items_sorted = sorted(items, key=lambda i: i['episode_int'])

    # set up overall feed metadata
    fg = FeedGenerator()

    # general feed params
    fg.id('https://r-podcast.org')
    fg.title('Residual Snippets')
    fg.author({'name': 'Eric Nantz', 'email': '*****@*****.**'})
    fg.link(href='https://r-podcast.org', rel='alternate')
    fg.logo('http://rsnippets.show.s3.amazonaws.com/residual_snippets.png')
    fg.subtitle('Musings on R, data science, linux, and life')
    fg.link(
        href=
        'https://filedn.com/lXHQDOYF1yHVL1Tsc38wxx7/site/residual_snippets.xml',
        rel='self')
    fg.language('en')

    fg.load_extension('podcast')

    # podcast-specific params
    fg.podcast.itunes_category('Technology')
    fg.podcast.itunes_author('Eric Nantz')
    fg.podcast.itunes_explicit('no')
    fg.podcast.itunes_owner('Eric Nantz', '*****@*****.**')
    fg.podcast.itunes_summary(
        'Residual Snippets is an informal, unedited, and free-flowing audio podcast from Eric Nantz.  If you enjoy hearing quick takes from a data scientist on their journey to blend innovative uses of open-source technology, contributing back to their brilliant communities, and juggling the curveballs life throws at them, this podcast is for you!'
    )

    for x in range(len(items_sorted)):
        #print(items[x])
        fe = fg.add_entry()
        fe.title(items_sorted[x]['episode_title'])
        fe.author({'name': 'Eric Nantz', 'email': '*****@*****.**'})
        fe.enclosure(url=items_sorted[x]['episode_url'], type='audio/mpeg')

        # process description before adding to feed
        ep_desc = create_summary(items_sorted[x]['episode_summary'])
        #fe.description(items_sorted[x]['episode_summary'])
        fe.description(ep_desc)

    # populate xml file for RSS feed
    feed_string = fg.rss_str(pretty=True)
    fg.rss_file('/tmp/residual_snippets.xml', pretty=True)

    # upload xml feed to pcloud and s3
    pc = PyCloud(PCLOUD_USERNAME, PCLOUD_PASS)
    pc.uploadfile(data=feed_string,
                  filename='residual_snippets.xml',
                  folderid=PCLOUD_FOLDER_ID)

    upload_file("/tmp/residual_snippets.xml",
                BUCKET_NAME,
                object_name='residual_snippets.xml')

    # create export of dynamodb and upload to s3
    # obtain all entries in database
    response2 = table.scan(FilterExpression=Attr('episode_int').gte(1))

    # save object with the items themselves
    items2 = response2['Items']

    items2_sorted = sorted(items2, key=lambda i: i['episode_int'])

    db_export = "/tmp/dbexport.json"
    f = open(db_export, "w")
    f.write(json.dumps(items2_sorted, indent=2, default=decimal_default))
    f.close()

    # upload to s3 bucket
    success = upload_file(db_export, BUCKET_NAME, object_name='dbexport.json')
コード例 #45
0
ファイル: __main__.py プロジェクト: ode79/python-feedgen
	arg = sys.argv[1]

	fg = FeedGenerator()
	fg.id('http://lernfunk.de/_MEDIAID_123')
	fg.title('Testfeed')
	fg.author( {'name':'Lars Kiesow','email':'*****@*****.**'} )
	fg.link( href='http://example.com', rel='alternate' )
	fg.category(term='test')
	fg.contributor( name='Lars Kiesow', email='*****@*****.**' )
	fg.contributor( name='John Doe', email='*****@*****.**' )
	fg.icon('http://ex.com/icon.jpg')
	fg.logo('http://ex.com/logo.jpg')
	fg.rights('cc-by')
	fg.subtitle('This is a cool feed!')
	fg.link( href='http://larskiesow.de/test.atom', rel='self' )
	fg.language('de')
	fe = fg.add_entry()
	fe.id('http://lernfunk.de/_MEDIAID_123#1')
	fe.title('First Element')
	fe.content('''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Tamen
			aberramus a proposito, et, ne longius, prorsus, inquam, Piso, si ista
			mala sunt, placet. Aut etiam, ut vestitum, sic sententiam habeas aliam
			domesticam, aliam forensem, ut in fronte ostentatio sit, intus veritas
			occultetur? Cum id fugiunt, re eadem defendunt, quae Peripatetici,
			verba.''')
	fe.summary('Lorem ipsum dolor sit amet, consectetur adipiscing elit...')
	fe.link( href='http://example.com', rel='alternate' )
	fe.author( name='Lars Kiesow', email='*****@*****.**' )

	if arg == 'atom':
		print fg.atom_str(pretty=True)
コード例 #46
0
        vid_opts['pageToken'] = vids_info['nextPageToken']
        vids_info = requests.get(
            'https://www.googleapis.com/youtube/v3/search',
            params=vid_opts).json()

    if True:
        external_ip = requests.get('https://api.ipify.org').text
        gen = FeedGenerator()
        gen.load_extension('podcast')
        gen.title(chan_name)
        gen.description(chan_desc)
        gen.link(href=chan_url, rel='alternate')
        gen.link(href=('https://' + external_ip + '/rss?chan=' + chan_name),
                 rel='self')
        gen.logo(chan_info['items'][0]['snippet']['thumbnails']['high']['url'])
        gen.language('en')
        # gen.podcast.itunes_category('Games & Hobbies', 'Video Games')
        # gen.podcast.itunes_explicit('no')
        gen.podcast.itunes_complete('no')
        gen.podcast.itunes_new_feed_url(chan_url)
        # gen.podcast.itunes_owner('videogamedunkey', '*****@*****.**')
        gen.podcast.itunes_summary(chan_desc)
        gen.podcast.itunes_author(chan_name)

        for root, dirs, files in os.walk(output_folder):
            for file in files:
                if file_ext in file:
                    entry = gen.add_entry()
                    entry.id(
                        'some link for now'
                    )  # TODO What does this need to be for the webserver to find it?
コード例 #47
0
ファイル: renderers.py プロジェクト: Austinsmom/bodhi
    def render(data, system):
        """
        Render the given data as an RSS view.

        If the request's content type is set to the default, this function will change it to
        application/rss+xml.

        Args:
            data (dict): A dictionary describing the information to be rendered. The information can
                be different types of objects, such as updates, users, comments, or overrides.
            system (pyramid.events.BeforeRender): Used to get the current request.
        Returns:
            basestring: An RSS document representing the given data.
        """
        request = system.get('request')
        if request is not None:
            response = request.response
            ct = response.content_type
            if ct == response.default_content_type:
                response.content_type = 'application/rss+xml'

        if 'updates' in data:
            key = 'updates'
        elif 'users' in data:
            key = 'users'
        elif 'comments' in data:
            key = 'comments'
        elif 'overrides' in data:
            key = 'overrides'

        feed = FeedGenerator()
        feed.title(key)
        feed.link(href=request.url, rel='self')
        feed.description(key)
        feed.language(u'en')

        def linker(route, param, key):
            def link_dict(obj):
                return dict(href=request.route_url(route, **{param: obj[key]}))

            return link_dict

        getters = {
            'updates': {
                'title': operator.itemgetter('title'),
                'link': linker('update', 'id', 'title'),
                'description': operator.itemgetter('notes'),
                'pubdate': lambda obj: utc.localize(obj['date_submitted']),
            },
            'users': {
                'title': operator.itemgetter('name'),
                'link': linker('user', 'name', 'name'),
                'description': operator.itemgetter('name'),
            },
            'comments': {
                'title': operator.itemgetter('rss_title'),
                'link': linker('comment', 'id', 'id'),
                'description': operator.itemgetter('text'),
                'pubdate': lambda obj: utc.localize(obj['timestamp']),
            },
            'overrides': {
                'title': operator.itemgetter('nvr'),
                'link': linker('override', 'nvr', 'nvr'),
                'description': operator.itemgetter('notes'),
                'pubdate': lambda obj: utc.localize(obj['submission_date']),
            },
        }

        for value in data[key]:
            feed_item = feed.add_item()
            for name, getter in getters[key].items():
                # Because we have to use methods to fill feed entry attributes,
                # it's done by getting methods by name and calling them
                # on the same line.
                getattr(feed_item, name)(getter(value))

        return feed.rss_str()
コード例 #48
0
ファイル: __main__.py プロジェクト: DRMacIver/notebook
def do_build(rebuild=False, full=True, name=""):
    only = name

    try:
        os.makedirs(HTML_POSTS)
    except FileExistsError:
        pass

    for name in tqdm(post_names()):
        source = os.path.join(POSTS, name + ".md")
        if not name.startswith(only):
            continue

        dest = os.path.join(HTML_POSTS, name + ".html")

        if not (rebuild or not os.path.exists(dest)
                or os.path.getmtime(source) > os.path.getmtime(dest)):
            continue

        with open(source) as i:
            source_text = i.read()

        with open(dest, "w") as o:
            o.write(post_html(template_cache_key(), name, source_text))

    if not full:
        return

    for post in glob(os.path.join(HTML_POSTS, "*.html")):
        source = os.path.join(POSTS,
                              os.path.basename(post).replace(".html", ".md"))
        if not os.path.exists(source):
            os.unlink(post)

    posts = [post_object(name) for name in tqdm(post_names())]

    posts.sort(key=lambda p: p.name, reverse=True)

    new_count = 0
    new_posts = posts[:new_count]

    old_posts = []

    for post in posts[new_count:]:
        date = dateutil.parser.parse(post.date)
        date = f"{date.year}-{date.month:02d}"

        if not old_posts or date != old_posts[-1][0]:
            old_posts.append((date, []))
        old_posts[-1][-1].append(post)

    with open(INDEX_PAGE, "w") as o:
        o.write(
            TEMPLATE_LOOKUP.get_template("index.html").render(
                new_posts=new_posts,
                old_posts=old_posts,
                title="Thoughts from David R. MacIver",
            ))

    fg = FeedGenerator()
    fg.id("https://notebook.drmaciver.com/")
    fg.title("DRMacIver's notebook")
    fg.author({"name": "David R. MacIver", "email": "*****@*****.**"})
    fg.link(href="https://notebook.drmaciver.com", rel="alternate")
    fg.link(href="https://notebook.drmaciver.com/feed.xml", rel="self")
    fg.language("en")

    dates = []

    for post in sorted(posts, key=lambda p: p.date, reverse=True)[:10]:
        fe = fg.add_entry()
        fe.id("https://notebook.drmaciver.com" + post.url)
        fe.link(href="https://notebook.drmaciver.com" + post.url)
        fe.title(post.title or post.name)
        fe.content(post.body, type="html")
        updated = (subprocess.check_output([
            "git",
            "log",
            "-1",
            "--date=iso8601",
            '--format="%ad"',
            "--",
            post.original_file,
        ]).decode("ascii").strip().strip('"'))
        if updated:
            updated = dateutil.parser.parse(updated)
        else:
            updated = datetime.strptime(post.name.replace(
                ".html", ""), POST_DATE_FORMAT).replace(tzinfo=tz.gettz())
        dates.append(updated)
        fe.updated(updated)

    fg.updated(max(dates))

    fg.atom_file(os.path.join(HTML_ROOT, "feed.xml"), pretty=True)
コード例 #49
0
ファイル: feed.py プロジェクト: deafmute1/refeed
class Feed():
    """ Instanceable class to manage a named feed including storage, retrieval and genration functions.

    :param feed_name: a string containg a feed name present in config.Feed.names() 
    """
    def __init__(self, feed_name: str) -> None:
        self.feed_name = feed_name
        self.alternates = {}
        self.added_mail_uuids = []
        self.written_mail_uuids = None

        # Retrieve fg from shelf is it exists otherwise create it using config options
        with shelve.open(
                str(Path(
                    config.paths["data"]).joinpath('feeds.shelf'))) as shelf:
            try:
                self.fg = shelf[self.feed_name]
            except KeyError as e:
                if not self.feed_name in shelf:
                    # Mandatory ATOM values
                    fg_config = config.ParseFeed.info(self.feed_name)
                    self.fg = FeedGenerator()
                    self.fg.id('tag:{},{}/feeds/{}.xml'.format(
                        fg_config['fqdn'], date.today(), feed_name))
                    href_ = '{}{}/feeds/{}.xml'.format(fg_config['protocol'],
                                                       fg_config['fqdn'],
                                                       feed_name)
                    self.fg.link(rel='self',
                                 type='application/atom+xml',
                                 href=href_)
                    self.fg.title(feed_name)
                    self.fg.subtitle(
                        'Feed generated from mail messages recieved at {} by refeed'
                        .format(config.ParseFeed.account_name(self.feed_name)))
                    self.fg.author(name=fg_config['author-name'])

                    # Optional values
                    try:
                        self.fg.logo(
                            str(
                                Path(config.paths["static"]).joinpath(
                                    fg_config['logo'])))
                    except KeyError:
                        pass

                    try:
                        self.fg.language(fg_config['language'])
                    except KeyError:
                        pass
                else:
                    raise KeyError(e)

    # context manager
    def __enter__(self) -> Feed:
        return self

    def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
        self._dump_shelves()

    def add_entries_from_dict_if_new(self, mails: Dict[int,
                                                       MailParser]) -> bool:
        try:
            for uuid, mail in mails:
                if FeedTools.uuid_not_in_feed(self.feed_name, uuid):
                    self.add_entry((uuid, mail))
        except (TypeError, ValueError):
            logging.error(
                'Given NoneType as mailobject to Feed, some error in mail with IMAP.',
                exc_info=True)
        except Exception:
            logging.error('Unexpected error', exc_info=True)

    def add_entry(self, mail: Tuple[int, MailParser]) -> None:
        random.seed(None, 2)
        fe = self.fg.add_entry(order='prepend')
        fg_config = config.ParseFeed.info(self.feed_name)

        # id
        try:
            fe.id('tag:{},{}/feeds/{}.xml:{}'.format(fg_config['fqdn'],
                                                     date.today(),
                                                     self.feed_name, mail[0]))
        except (AttributeError, MailParserReceivedParsingError):
            fe.id('tag:{},{}/feeds/{}.xml:ID_NOT_FOUND-{}'.format(
                fg_config['fqdn'], date.today(), self.feed_name, ''.join(
                    random.choices(string.ascii_lowercase + string.digits,
                                   k=10))))

        # title
        try:
            fe.title(mail[1].subject)
        except (AttributeError, MailParserReceivedParsingError):
            fe.title('SUBJECT_NOT_FOUND-{}'.format(''.join(
                random.choices(string.ascii_lowercase + string.digits, k=10))))

        # alt link and body contents
        try:
            alt_id = FeedTools.generate_unique_alt_id()
            self.alternates[alt_id] = mail[1].body
            alt_link = '{}{}/alt-html/{}.html'.format(fg_config['protocol'],
                                                      fg_config['fqdn'],
                                                      alt_id)
            fe.link(rel='alternate', type='text/html', href=alt_link)
            fe.contents(content=mail[1].body, src=alt_link, type='text/html')
        except (AttributeError, MailParserReceivedParsingError):
            fe.contents(content='MAIL_BODY_NOT_FOUND', type='text/plain')

        #update time
        now = datetime.now(
        )  # entry and feed should match exactly, not be a few seconds off.
        fe.updated(now)
        self.fg.updated(now)

        # cache uuids added to feed
        self.added_mail_uuids.append(mail[0])

    def generate_feed(self) -> None:
        # generate htmls
        if self.alternates != {}:
            try:
                for alt_id, body in self.alternates.items():
                    with Path(config.paths["static"]).joinpath(
                            'alt',
                            '{}.html'.format(str(alt_id))).open(mode='w') as f:
                        f.write(body)
            except Exception:  # Exception gets *most* inbuilt exceptions, except KeyboardInterrupt, SystemInterrupt and some others which are out of scope
                logging.error(
                    'Failed to write some html alt pages to file for new entries for feed {}'
                    .format(self.feed_name),
                    exc_info=True)
            finally:
                logging.info(
                    'Successfully generated html alt pages: {} for feed {}'.
                    format(list(self.alternates.keys()), self.feed_name))
                FeedTools.cleanup_alts(
                    self.feed_name,
                    config.ParseFeed.alternate_cache(self.feed_name))

        # generate xml
        try:
            self.fg.atom_file(
                str(
                    Path(config.paths["static"]).joinpath(
                        'feed', '{}.xml'.format(self.feed_name))))
        except Exception:  # TODO: Find out what f*****g exceptions that feedgen actually raises, if any(not documented - check source)
            logging.error(
                'Failed to generate and write new copy of feed {} to file'.
                format(self.feed_name))
        finally:
            self.written_mail_uuids = self.added_mail_uuids

    def _dump_shelves(self) -> None:
        with shelve.open(
                str(Path(
                    config.paths["data"]).joinpath('feeds.shelf'))) as shelf:
            shelf[self.feed_name] = self.fg
            logging.info('Atom data for feed {} stored to disk'.format(
                self.feed_name))

        with shelve.open(
                str(
                    Path(config.paths["data"]).joinpath(
                        'alternate_ids.shelf'))) as shelf:
            try:
                shelf[self.feed_name] = shelf[self.feed_name].extend(
                    list(self.alternates.keys()))
            except (KeyError,
                    AttributeError):  # feed alternates list does not exist yet
                shelf[self.feed_name] = list(self.alternates.keys())
                logging.info(
                    'Alt id data for feed {} stored to disk for first time'.
                    format(self.feed_name))
            finally:
                logging.info(
                    'Alt id data for feed {} stored back to disk'.format(
                        self.feed_name))

        with shelve.open(
                str(Path(config.paths["data"]).joinpath(
                    'mail_uuids.shelf'))) as shelf:
            try:
                shelf[self.feed_name] = shelf[self.feed_name].extend(
                    self.written_mail_uuids)
            except (KeyError,
                    AttributeError):  # feed id list does not exist yet
                shelf[self.feed_name] = self.written_mail_uuids
                logging.info(
                    'Mail UUID data for feed {} stored to disk for first time'.
                    format(self.feed_name))
            except TypeError:
                if self.written_mail_uuids is None:
                    logging.info(
                        'Failed to write mail UUIDs to shelf file for feed {}: Newly written mail UUID data is None. Feed._dump_shelves() was likely called without any new items beeing added to feed'
                        .format(self.feed_name),
                        exc_info=True)
                else:
                    logging.error(
                        'Failed to write mail UUIDs to shelf file for feed {}: Newly written mail UUID is not None, some unexpected error has occured. '
                        .format(self.feed_name),
                        exc_info=True)
            finally:
                logging.info(
                    'Mail UUID data for feed {} stored back to disk'.format(
                        self.feed_name))
コード例 #50
0
ファイル: rssfeeds.py プロジェクト: K-ennethA/CPSC-449
def get_top_25_recent_post_part_comm(comm, num_of_posts):
    try:
        response = requests.get(
            'http://localhost:5100/api/v1/resources/votes/top/{}'.format(
                num_of_posts), )  #return the top 25
        # If the response was successful, no Exception will be raised
        response.raise_for_status()

    except HTTPError as http_err:
        print(f'HTTP error occurred: {http_err}')  # Python 3.6
    except Exception as err:
        print(f'Other error occurred: {err}')  # Python 3.6
    else:
        print('Success!')

    #Inspect some attributes of the `requests` repository
    votes_json_response = response.json()
    post_ids = []  #create a list of ids to be later sorted
    for vote in votes_json_response:
        post_ids.append(vote['PostID'])
        print(vote)
    #sort the list of post ids with the post request to the given local host
    sorted_votes = requests.post(
        'http://localhost:5100/api/v1/resources/votes/list',
        data={'list': str(post_ids)})
    sorted_votes_json_response = sorted_votes.json()

    #The top 25 posts to a particular community, sorted by score
    fg = FeedGenerator()
    fg.id('http://localhost:5200/api/rss/resources/posts/{}/{}'.format(
        comm, num_of_posts))
    fg.title('25 most recent posts for a particular comminity')
    fg.subtitle('Reddit')
    fg.language('en')
    fg.link(href='reddit.com')

    for vote in sorted_votes_json_response:
        try:
            response = requests.get(
                'http://localhost:5000/api/v1/resources/posts/{}'.format(
                    vote['PostID']),
            )  #given the id of the votes find the corresponding post
            #Add Feed Entries for all 25 posts
            # If the response was successful, no Exception will be raised
            response.raise_for_status()
        except HTTPError as http_err:
            print(f'HTTP error occurred: {http_err}')  # Python 3.6
        except Exception as err:
            print(f'Other error occurred: {err}')  # Python 3.6
        else:
            print('Success!')

        post_response = response.json()
        if post_response[
                'subreddit'] == comm:  #filter by a particular community
            fe = fg.add_entry()
            fe.id(str(vote['PostID']))
            fe.title(post_response['title'])
            fe.author({'name': post_response['username']})
            fe.pubDate(post_response['date'] + '-7:00')

    #Generate the Feed
    rssfeed = fg.rss_str(pretty=True)
    rssfeed = rssfeed.decode()  #changes type bytes to type string
    return Response(rssfeed, mimetype='rss+xml'
                    )  #changes the Content-Type of the application to rss+xml
コード例 #51
0
    def emit_group_rss(self, group=None, groupname=None):
        md = markdown.Markdown()
        from feedgen.feed import FeedGenerator
        fg = FeedGenerator()
        fg.id('https://h.jonudell.info')
        fg.title('Hypothesis group %s' % groupname)
        fg.author({'name': 'Jon Udell', 'email': '*****@*****.**'})
        fg.description("Hypothesis notifications for group %s" % groupname)
        fg.link(href='https://h.jonudell.info/group_rss')
        fg.language('en')
        h = Hypothesis(token=self.token, limit=20)
        ids = self.data()
        annos = []
        for id in ids:
            try:
                anno = h.get_annotation(id)
                assert ('id' in anno.keys())
                annos.append(anno)
            except:
                print('cannot get %s, deleted?' % id)
            annos.sort(key=itemgetter('updated'), reverse=True)
        annos = [HypothesisAnnotation(a) for a in annos]
        for anno in annos:
            ref_user = None
            in_reply_to = None
            root_id = anno.id
            if len(anno.references) > 0:
                try:
                    ref_id = anno.references[-1:][0]
                    root_id = anno.references[0]
                    print 'ref_id: %s, root_id %s' % (ref_id, root_id)
                    ref = h.get_annotation(ref_id)
                    print 'ref: %s' % ref
                    ref_user = HypothesisAnnotation(ref).user
                    in_reply_to = '<p>in reply to %s </p>' % ref_user
                except:
                    print "cannot get user for ref"
            fe = fg.add_entry()
            fe.id(anno.id)
            fe.title('%s annotated %s in the group %s at %s ' %
                     (anno.user, anno.doc_title, groupname, anno.updated))
            fe.author({"email": None, "name": anno.user, "uri": None})
            dl = "https://hyp.is/%s" % anno.id
            fe.link({"href": "%s" % dl})
            content = ''
            if ref_user is not None:
                content += in_reply_to
            if anno.exact is not None:
                content += '<p>in reference to: </p> <p> <blockquote><em>%s</em></blockquote></p>' % anno.exact
            content += '<p> %s <a href="https://hyp.is/%s">said</a>: </p> ' % (
                anno.user, root_id)
            content += '%s ' % md.convert(anno.text)
            if len(anno.tags):
                content += '<p>tags: %s' % ', '.join(anno.tags)
            fe.content(content, type='CDATA')
            dt = dateutil.parser.parse(anno.updated)
            dt_tz = dt.replace(tzinfo=pytz.UTC)
            fe.pubdate(dt_tz)

        rssfeed = fg.rss_str(pretty=True)  # Get the RSS feed as string
        fg.rss_file('%s.xml' % group)  # Write the RSS feed to a file
コード例 #52
0
ファイル: rssfeeds.py プロジェクト: K-ennethA/CPSC-449
def get_hot_25_post_any_comm(num_of_posts):
    hot_list = []  #list of the hot posts
    try:
        response = requests.get(
            'http://localhost:5100/api/v1/resources/votes/top/{}'.format(
                num_of_posts), )  #return the top 25
        response.raise_for_status(
        )  # If the response was successful, no Exception will be raised
    except HTTPError as http_err:
        print(f'HTTP error occurred: {http_err}')  # Python 3.6
    except Exception as err:
        print(f'Other error occurred: {err}')  # Python 3.6
    else:
        print('Success!')

    #Inspect some attributes of the `requests` repository
    votes_json_response = response.json()

    #The top 25 posts to a particular community
    fg = FeedGenerator()
    fg.id('http://localhost:5100/api/rss/resources/posts/{}'.format(
        num_of_posts))
    fg.title('25 most recent posts for a particular comminity')
    fg.subtitle('Reddit')
    fg.language('en')
    fg.link(href='reddit.com')

    for vote in votes_json_response:
        post = {}  #create a dictionary of ids and hot rank
        try:

            #given the id of the votes find the corresponding upvote and downvote
            up_down_vote_response = requests.get(
                'http://localhost:5100/api/v1/resources/votes/{}'.format(
                    vote['PostID']), )
            #give the vote id find the corresponding post
            post_response = requests.get(
                'http://localhost:5000/api/v1/resources/posts/{}'.format(
                    vote['PostID']), )
            response.raise_for_status(
            )  # If the response was successful, no Exception will be raised

        except HTTPError as http_err:
            print(f'HTTP error occurred: {http_err}')  # Python 3.6
        except Exception as err:
            print(f'Other error occurred: {err}')  # Python 3.6
        else:
            print('Success!')

        #create json responses for the following get requests
        up_down_vote_json_res = up_down_vote_response.json()
        post_json_response = post_response.json()

        #save the upvote and downvote so they can be used to calculate the hot rank
        upvote = up_down_vote_json_res['upvotes']
        downvote = up_down_vote_json_res['downvotes']

        json_date = str(post_json_response['date']).split(' ')[0].split('-')
        json_time = str(post_json_response['date']).split(' ')[1].split(':')

        epoch = datetime(1970, 1, 1)
        date = datetime(int(json_date[0]), int(json_date[1]),
                        int(json_date[2]), int(json_time[0]),
                        int(json_time[1]), int(json_time[2]))

        def epoch_seconds(date):
            td = date - epoch
            return td.days * 86400 + td.seconds + (float(td.microseconds) /
                                                   1000000)

        def score(upvote, downvote):
            return int(upvote) - int(downvote)

        #hot ranking
        s = score(upvote, downvote)
        order = log(max(abs(s), 1), 10)
        sign = 1 if s > 0 else -1 if s < 0 else 0
        seconds = epoch_seconds(date) - 1134028003
        post['id'] = vote['PostID']
        post['rank'] = round(sign * order + seconds / 45000, 7)
        hot_list.append(post)
    #sort the list
    sorted_hot_list = sorted(hot_list, key=lambda i: i['rank'])
    #grab the first 25 hot rankings and add it to the rss feed
    for post in sorted_hot_list[:26]:
        fe = fg.add_entry()
        fe.id(str(vote['PostID']))
        fe.title(post_json_response['title'])
        fe.author({'name': post_json_response['username']})
        fe.pubDate(post_json_response['date'] + '-7:00')

    #Generate the Feed
    rssfeed = fg.rss_str(pretty=True)
    rssfeed = rssfeed.decode()  #changes type bytes to type string
    return Response(rssfeed, mimetype='rss+xml'
                    )  #changes the Content-Type of the application to rss+xml
コード例 #53
0
def generate_atom_feeds(app):
    """
    Generate archive pages for all posts, categories, tags, authors, and
    drafts.
    """

    if not ablog.builder_support(app):
        return

    blog = Blog(app)

    url = blog.blog_baseurl
    if not url:
        return

    feed_path = os.path.join(app.builder.outdir, blog.blog_path, "atom.xml")

    feeds = [(
        blog.posts,
        blog.blog_path,
        feed_path,
        blog.blog_title,
        os_path_join(url, blog.blog_path, "atom.xml"),
    )]

    if blog.blog_feed_archives:

        for header, catalog in [
            (_("Posts by"), blog.author),
            (_("Posts from"), blog.location),
            (_("Posts in"), blog.language),
            (_("Posts in"), blog.category),
            (_("Posted in"), blog.archive),
            (_("Posts tagged"), blog.tags),
        ]:

            for coll in catalog:
                # skip collections containing only drafts
                if not len(coll):
                    continue
                folder = os.path.join(app.builder.outdir, coll.path)
                if not os.path.isdir(folder):
                    os.makedirs(folder)

                feeds.append((
                    coll,
                    coll.path,
                    os.path.join(folder, "atom.xml"),
                    blog.blog_title + " - " + header + " " + text_type(coll),
                    os_path_join(url, coll.path, "atom.xml"),
                ))

    # Config options
    feed_length = blog.blog_feed_length
    feed_fulltext = blog.blog_feed_fulltext

    for feed_posts, pagename, feed_path, feed_title, feed_url in feeds:

        feed = FeedGenerator()
        feed.id("http://lernfunk.de/media/654321")
        feed.title(feed_title)
        feed.link(href=url)
        feed.subtitle(blog.blog_feed_subtitle)
        feed.link(href=feed_url)
        feed.language("en")
        feed.generator("ABlog", ablog.__version__,
                       "https://ablog.readthedocs.org")

        for i, post in enumerate(feed_posts):
            if feed_length and i == feed_length:
                break
            post_url = os_path_join(url,
                                    app.builder.get_target_uri(post.docname))
            if post.section:
                post_url += "#" + post.section

            if blog.blog_feed_titles:
                content = None
            else:
                content = post.to_html(pagename, fulltext=feed_fulltext)

            feed_entry = feed.add_entry()
            feed_entry.id(post_url)
            feed_entry.title(post.title)
            feed_entry.link(href=post_url)
            feed_entry.author({"name": author.name for author in post.author})
            feed_entry.pubDate(post.date.astimezone())
            feed_entry.updated(post.update.astimezone())
            feed_entry.content(content=content, type="html")

        parent_dir = os.path.dirname(feed_path)
        if not os.path.isdir(parent_dir):
            os.makedirs(parent_dir)

        with open(feed_path, "w", encoding="utf-8") as out:
            feed_str = feed.atom_str(pretty=True)
            out.write(feed_str.decode())

    if 0:
        # this is to make the function a generator
        # and make work for Sphinx 'html-collect-pages'
        yield
コード例 #54
0
def main(

    # Absolute or relateive path to MP3 files on your local computer
    #
    # NB: Use the *.mp3 syntax to select and parse all MP3s in the folder
    # Also NB: Make sure each file follows this naming convention:
    #   n-xx Name of the track.mp3
    # where `n` is the disc number (e.g. 1), and `xx` is the track number (e.g. 07)
    #   Example: 2-14 Act IV Scene iii.mp3
    #
    # If you want, you can change the regular expression that parses these
    # filenames below at `track_name_raw = re.match(...)`
    # local_location = '/path/to/ripped/mp3s/*.mp3'
    *local_files,
        
    # ----------------------------------
    # Configure variables for the feed
    # ----------------------------------
    # Base URL for where the podcast files and feed will ultimately live
    base_url = 'http://files.example.com/fauxcasts/book_name/',

    # Name for the RSS file
    feed_name = 'feed.rss',

    # Information about the podcast
    feed_title = 'Podcast title',
    feed_description = "Description of podcast",
    feed_author = 'Some name here',
    feed_author_email = '*****@*****.**',
    feed_homepage = 'http://www.example.com'

        ):

    feed_url = base_url + feed_name
    # Name of the pre-uploaded podcast cover image
    cover_image = base_url + 'cover.jpg'

    # ----------------------
    # Generate actual feed
    # ----------------------
    # Generate feed
    fg = FeedGenerator()
    fg.load_extension('podcast')

    # Add descriptive variables to the feed
    fg.id(feed_url)
    fg.title(feed_title)
    fg.author({'name': feed_author, 'email': feed_author_email})
    fg.link(href=feed_homepage, rel='alternate')
    fg.logo(cover_image)
    fg.subtitle(feed_description)
    fg.link(href=feed_url, rel='self')
    fg.language('en')
    fg.podcast.itunes_block(True)
    fg.podcast.itunes_complete(True)


    # Loop through each MP3 and add it to the feed as an episode
    for i, track in enumerate(sorted(local_files)):
        # Some podcast players respect the itunes_order attribute, which is set
        # below, but many only look at the date and time of the episode. So, here
        # we pretend that the first episode happened 7 days ago, and each
        # subsequent episode is released 1 hour later.
        episode_date = (datetime.now(tz=pytz.utc) -
                        timedelta(days=7) +
                        timedelta(hours=i + 1))

        # Get the file size
        file_size = os.path.getsize(track)

        # Remove the disk and track numbers from the file names and use just the
        # title as the episode name
        track_filename = track
        track_name = track_filename

        # Get the duration
        try :
            audio = MP3(track)
            m, s = divmod(audio.info.length, 60)  # Convert seconds to h:m:s
            h, m = divmod(m, 60)
            if h == 0:
                duration = "%02d:%02d" % (m, s)
            else:
                duration = "%d:%02d:%02d" % (h, m, s)
        except :
            duration = "99:99"

        # Generate entry
        fe = fg.add_entry()
        fe.guid(base_url + track_filename)
        fe.link({'href': base_url + track_filename})
        fe.title(track_name)
        fe.description(track_name)
        fe.published(episode_date)
        fe.enclosure(base_url + track_filename, str(file_size), 'audio/mpeg')
        fe.podcast.itunes_order(i + 1)
        fe.podcast.itunes_duration(duration)

    # Write the feed to a file
    fg.rss_file(feed_name, pretty=True)
コード例 #55
0
    def setUp(self):

        fg = FeedGenerator()

        self.nsAtom = "http://www.w3.org/2005/Atom"
        self.nsRss = "http://purl.org/rss/1.0/modules/content/"

        self.feedId = 'http://lernfunk.de/media/654321'
        self.title = 'Some Testfeed'

        self.authorName = 'John Doe'
        self.authorMail = '*****@*****.**'
        self.author = {'name': self.authorName, 'email': self.authorMail}

        self.linkHref = 'http://example.com'
        self.linkRel = 'alternate'

        self.logo = 'http://ex.com/logo.jpg'
        self.subtitle = 'This is a cool feed!'

        self.link2Href = 'http://larskiesow.de/test.atom'
        self.link2Rel = 'self'

        self.language = 'en'

        self.categoryTerm = 'This category term'
        self.categoryScheme = 'This category scheme'
        self.categoryLabel = 'This category label'

        self.cloudDomain = 'example.com'
        self.cloudPort = '4711'
        self.cloudPath = '/ws/example'
        self.cloudRegisterProcedure = 'registerProcedure'
        self.cloudProtocol = 'SOAP 1.1'

        self.icon = "http://example.com/icon.png"
        self.contributor = {
            'name': "Contributor Name",
            'uri': "Contributor Uri",
            'email': 'Contributor email'
        }
        self.copyright = "The copyright notice"
        self.docs = 'http://www.rssboard.org/rss-specification'
        self.managingEditor = '*****@*****.**'
        self.rating = '(PICS-1.1 "http://www.classify.org/safesurf/" ' + \
            '1 r (SS~~000 1))'
        self.skipDays = 'Tuesday'
        self.skipHours = 23

        self.textInputTitle = "Text input title"
        self.textInputDescription = "Text input description"
        self.textInputName = "Text input name"
        self.textInputLink = "Text input link"

        self.ttl = 900

        self.webMaster = '*****@*****.**'

        fg.id(self.feedId)
        fg.title(self.title)
        fg.author(self.author)
        fg.link(href=self.linkHref, rel=self.linkRel)
        fg.logo(self.logo)
        fg.subtitle(self.subtitle)
        fg.link(href=self.link2Href, rel=self.link2Rel)
        fg.language(self.language)
        fg.cloud(domain=self.cloudDomain,
                 port=self.cloudPort,
                 path=self.cloudPath,
                 registerProcedure=self.cloudRegisterProcedure,
                 protocol=self.cloudProtocol)
        fg.icon(self.icon)
        fg.category(term=self.categoryTerm,
                    scheme=self.categoryScheme,
                    label=self.categoryLabel)
        fg.contributor(self.contributor)
        fg.copyright(self.copyright)
        fg.docs(docs=self.docs)
        fg.managingEditor(self.managingEditor)
        fg.rating(self.rating)
        fg.skipDays(self.skipDays)
        fg.skipHours(self.skipHours)
        fg.textInput(title=self.textInputTitle,
                     description=self.textInputDescription,
                     name=self.textInputName,
                     link=self.textInputLink)
        fg.ttl(self.ttl)
        fg.webMaster(self.webMaster)

        self.fg = fg
コード例 #56
0
ファイル: generate_feed.py プロジェクト: dark-nova/tos_rss
    """

    entry = fg.add_entry()
    entry.title(article['title'])
    entry.author({'name': 'Tree of Savior'})
    entry.description(article['title'])
    entry.link(href=article['url'])
    entry.guid(article['url'])
    entry.pubDate(article['date'])
    return


if __name__ == '__main__':
    fg = FeedGenerator()
    fg.title('Tree of Savior News')
    fg.author({'name': 'IMC Games'})
    fg.description('News for the International Tree of Savior Servers')
    fg.link(href='https://treeofsavior.com/page/news/', rel='alternate')
    fg.link(href='https://dark-nova.me/tos/feed.xml', rel='self')
    fg.logo('https://dark-nova.me/tos/feed.png')
    fg.language('en-US')

    all_news = scraper.get_news()
    for article in all_news:
        populate_item(fg, article)

    if len(fg.entry()) > 0:
        fg.rss_file('feed.xml')
    else:
        scraper.logger.error(f'Could not generate entries for feed')
コード例 #57
0
ファイル: renderers.py プロジェクト: nphilipp/bodhi
    def render(data, system):
        """
        Render the given data as an RSS view.

        If the request's content type is set to the default, this function will change it to
        application/rss+xml.

        Args:
            data (dict): A dictionary describing the information to be rendered. The information can
                be different types of objects, such as updates, users, comments, or overrides.
            system (pyramid.events.BeforeRender): Used to get the current request.
        Returns:
            basestring: An RSS document representing the given data.
        """
        request = system.get('request')
        if request is not None:
            response = request.response
            ct = response.content_type
            if ct == response.default_content_type:
                response.content_type = 'application/rss+xml'

        if 'updates' in data:
            key = 'updates'
            feed_title = 'Released updates'
        elif 'users' in data:
            key = 'users'
            feed_title = 'Bodhi users'
        elif 'comments' in data:
            key = 'comments'
            feed_title = 'User comments'
        elif 'overrides' in data:
            key = 'overrides'
            feed_title = 'Update overrides'
        else:
            # This is a request we don't know how to render. Let's return BadRequest and log.
            log.debug('Unable to render RSS feed for data: %s', data)
            # See if we have a request so we can set a code without raising an Exception
            if request is not None:
                response.status = HTTPBadRequest.code
                return 'Invalid RSS feed request'
            else:
                raise HTTPBadRequest('Invalid RSS feed request')

        feed_description_list = []
        for k in request.GET.keys():
            feed_description_list.append('%s(%s)' % (k, request.GET[k]))
        if feed_description_list:
            feed_description = 'Filtered on: ' + ', '.join(
                feed_description_list)
        else:
            feed_description = "All %s" % (key)

        feed = FeedGenerator()
        feed.title(feed_title)
        feed.link(href=request.url, rel='self')
        feed.description(feed_description)
        feed.language('en')

        def linker(route, param, key):
            def link_dict(obj):
                return dict(href=request.route_url(route, **{param: obj[key]}))

            return link_dict

        getters = {
            'updates': {
                'title': operator.itemgetter('alias'),
                'link': linker('update', 'id', 'title'),
                'description': operator.itemgetter('notes'),
                'pubdate': lambda obj: utc.localize(obj['date_submitted']),
            },
            'users': {
                'title': operator.itemgetter('name'),
                'link': linker('user', 'name', 'name'),
                'description': operator.itemgetter('name'),
            },
            'comments': {
                'title': operator.itemgetter('rss_title'),
                'link': linker('comment', 'id', 'id'),
                'description': operator.itemgetter('text'),
                'pubdate': lambda obj: utc.localize(obj['timestamp']),
            },
            'overrides': {
                'title': operator.itemgetter('nvr'),
                'link': linker('override', 'nvr', 'nvr'),
                'description': operator.itemgetter('notes'),
                'pubdate': lambda obj: utc.localize(obj['submission_date']),
            },
        }

        for value in data[key]:
            feed_item = feed.add_item()
            for name, getter in getters[key].items():
                # Because we have to use methods to fill feed entry attributes,
                # it's done by getting methods by name and calling them
                # on the same line.
                getattr(feed_item, name)(getter(value))

        return feed.rss_str()
コード例 #58
0
ファイル: post.py プロジェクト: tg-m/ablog
def generate_atom_feeds(app):
    """
    Generate archive pages for all posts, categories, tags, authors, and
    drafts.
    """

    if not ablog.builder_support(app):
        return

    blog = Blog(app)

    url = blog.blog_baseurl
    if not url:
        return

    feeds = [
        (
            blog.posts,
            blog.blog_path,
            os.path.join(app.builder.outdir, blog.blog_path, feed_root + ".xml"),
            blog.blog_title,
            os_path_join(url, blog.blog_path, feed_root + ".xml"),
            feed_templates,
        )
        for feed_root, feed_templates in blog.blog_feed_templates.items()
    ]

    if blog.blog_feed_archives:
        for header, catalog in [
            (_("Posts by"), blog.author),
            (_("Posts from"), blog.location),
            (_("Posts in"), blog.language),
            (_("Posts in"), blog.category),
            (_("Posted in"), blog.archive),
            (_("Posts tagged"), blog.tags),
        ]:

            for coll in catalog:
                # skip collections containing only drafts
                if not len(coll):
                    continue
                folder = os.path.join(app.builder.outdir, coll.path)
                if not os.path.isdir(folder):
                    os.makedirs(folder)

                for feed_root, feed_templates in blog.blog_feed_templates.items():
                    feeds.append(
                        (
                            coll,
                            coll.path,
                            os.path.join(folder, feed_root + ".xml"),
                            blog.blog_title + " - " + header + " " + text_type(coll),
                            os_path_join(url, coll.path, feed_root + ".xml"),
                            feed_templates,
                        )
                    )

    # Config options
    feed_length = blog.blog_feed_length
    feed_fulltext = blog.blog_feed_fulltext

    for feed_posts, pagename, feed_path, feed_title, feed_url, feed_templates in feeds:

        feed = FeedGenerator()
        feed.id(blog.blog_baseurl)
        feed.title(feed_title)
        feed.link(href=url)
        feed.subtitle(blog.blog_feed_subtitle)
        feed.link(href=feed_url, rel="self")
        feed.language(app.config.language)
        feed.generator("ABlog", ablog.__version__, "https://ablog.readthedocs.org/")

        for i, post in enumerate(feed_posts):
            if feed_length and i == feed_length:
                break
            post_url = os_path_join(url, app.builder.get_target_uri(post.docname))
            if post.section:
                post_url += "#" + post.section

            if blog.blog_feed_titles:
                content = None
            else:
                content = post.to_html(pagename, fulltext=feed_fulltext)

            feed_entry = feed.add_entry()
            feed_entry.id(post_url)
            feed_entry.link(href=post_url)
            feed_entry.author({"name": author.name for author in post.author})
            feed_entry.pubDate(post.date.astimezone())
            feed_entry.updated(post.update.astimezone())
            for tag in post.tags:
                feed_entry.category(
                    dict(
                        term=tag.name.strip().replace(" ", ""),
                        label=tag.label,
                    )
                )

            # Entry values that support templates
            title = post.title
            summary = "".join(paragraph.astext() for paragraph in post.excerpt)
            template_values = {}
            for element in ("title", "summary", "content"):
                if element in feed_templates:
                    template_values[element] = jinja2.Template(feed_templates[element]).render(**locals())
            feed_entry.title(template_values.get("title", title))
            summary = template_values.get("summary", summary)
            if summary:
                feed_entry.summary(summary)
            content = template_values.get("content", content)
            if content:
                feed_entry.content(content=content, type="html")

        parent_dir = os.path.dirname(feed_path)
        if not os.path.isdir(parent_dir):
            os.makedirs(parent_dir)

        with open(feed_path, "w", encoding="utf-8") as out:
            feed_str = feed.atom_str(pretty=True)
            out.write(feed_str.decode())

    if 0:
        # this is to make the function a generator
        # and make work for Sphinx 'html-collect-pages'
        yield
コード例 #59
0
ファイル: generate_feed.py プロジェクト: pydanny/pydanny-v2
    try:
        tag = sys.argv[1]
    except IndexError:
        print('Add a tag argument such as "python"')
        sys.exit(1)

    # TODO - convert to argument
    YEARS = ["2020", "2021"]

    markdowner = Markdown(extras=[
        "fenced-code-blocks",
    ])

    fg = FeedGenerator()
    fg.language('en')
    fg.id("https://daniel.roygreenfeld.com/")
    fg.title("pydanny")
    fg.author({
        "name": "Daniel Roy Greenfeld",
        "email": "*****@*****.**",
    })
    fg.link(href="https://daniel.roygreenfeld.com", rel="alternate")
    fg.logo("https://daniel.roygreenfeld.com/images/personalPhoto.png")
    fg.subtitle("Inside the Head of Daniel Roy Greenfeld")
    fg.link(href=f"https://daniel.roygreenfeld.com/feeds/{tag}.atom.xml",
            rel="self")
    # https://daniel.roygreenfeld.com/feeds/python.atom.xml
    fg.language("en")

    years = [f"_posts/posts{x}/*.md" for x in YEARS]
コード例 #60
0
                   type='html')
        fe.link(href=self.href, hreflang=self.lang)
        fe.updated(self.date.astimezone(tz=None))
        fe.author(self.authors)

        return fe


log.debug('Opening connection to %s', targeturl)
with urlopen(request) as response:
    log.debug('Feeding page into BeautifulSoup')
    soup = BeautifulSoup(response.read().decode(), features='lxml')

    log.debug('Getting all items in feed-item-container')
    items = soup.find_all('div', class_='feed-item-container')
    fg = FeedGenerator()
    fg.id(targeturl)
    fg.link(href=targeturl, rel='alternate')
    fg.title('Fate/GO Gamepress')
    fg.subtitle('Fate/Grand Order gamepress feed')
    fg.logo(logourl)
    fg.language('en')

    for item in items:
        fi = FeedItem(item)
        log.debug('\"%s\": Generating feed entry', fi.title)
        fi.generate_feedentry(fg)

    log.debug('Finishing up, generating atom.xml')
    fg.atom_file('atom.xml')