Exemplo n.º 1
0
class Feeder():
    def __init__( self, url, title='', feedURL='' ):
        scraper = None
        if url.startswith( "https://twitter.com/" ):
            scraper = TwitterScraper( url )
            if title == '':
                title = "Twitter: @" + url.split('/')[3]
        elif url.startswith( "http://www.lindwurm-linden.de/termine" ):
            scraper = LindwurmScraper( url )
            if title == '':
                title = "Lindwurm: Termine"
        else:
            raise UnsupportedService( "No scraper found for this URL." )

        self.feed = FeedGenerator()        
        self.feed.id( url )
        self.feed.title( title )
        self.feed.author( { "name": url } )

        if feedURL != '':
            self.feed.link( href=feedURL, rel='self' )

        for entry in scraper.entries:
            fe = self.feed.add_entry()
            fe.id( entry['url'] )
            fe.title( entry['title'] )
            fe.link( href=entry['url'], rel='alternate' )
            fe.content( entry['text'] )

    def GetAtom( self ):
        return self.feed.atom_str( pretty=True ).decode()
Exemplo n.º 2
0
def generateFeeds(buffered, meta):
    utc = pytz.utc
    fg = FeedGenerator()
    fg.id(meta['id'])
    fg.title(meta['title'])
    fg.author(meta['author'])
    fg.subtitle(meta['subtitle'])
    fg.link( href=meta['link'], rel='self' )
    fg.language(meta['language'])

    for tweet in buffered:
        fe = fg.add_entry()
        fe.id(tweet['url'].decode('utf-8'))
        fe.published(utc.localize(tweet['created_at']).astimezone(pytz.timezone(locale)))
        
        #fe.guid(tweet['url'].decode('utf-8'))
        fe.link(href=tweet['url'].decode('utf-8'), rel='alternate')
        fe.title(tweet['readable_title'])
        fe.description(tweet['readable_article'])
                
        try:
            fe.author({'name': '', 'email':tweet['user_name'].decode('utf-8') + ": " + tweet['text'].decode('utf-8')})
        except Exception, e:
            logger.error(e)
            fe.author({'name': 'a', 'email':'*****@*****.**'})
Exemplo n.º 3
0
def rss():    
    config = public_app.config['feed']
    fg = FeedGenerator()
    fg.id('%s/blog' % Config.BASE_URL)
    fg.title(config['title'])
    fg.author( {'name': config['author'],'email': config['email']} )
    fg.description(config['desc'])
    fg.link( href=Config.BASE_URL, rel='alternate' )
    query = {
        'id': { '$regex': 'blog' },
        'current': True,
        'meta.hide': { '$ne': True }
    }
    posts = db.pages.find(query).sort('meta.created', -1)[:20]
    for post in posts:
        fe = fg.add_entry()
        fe.title(post['meta']['title'])
        if 'author' in post['meta']:
            fe.author( {'name': post['meta']['author'],'email': config['email']} )
        else:
            fe.author( {'name': config['author'],'email': config['email']} )
        fe.description(do_truncate(post['content'], 300))
        fe.link(href="%s/%s" % (Config.BASE_URL, post['id']), rel='alternate')
        fe.pubdate(utc.localize(post['meta']['created']))
        fe.content(post['content'])    
    response.headers['Content-Type'] = 'application/rss+xml'
    return fg.rss_str(pretty=True)
Exemplo n.º 4
0
	def GET(self):
		cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
		fg = FeedGenerator()
		#TODO create icon
		# fg.icon('http://www.det.ua.pt')
		fg.id(config.get('rss','id'))
		fg.title(config.get('rss','title'))
		fg.subtitle(config.get('rss','subtitle'))
		fg.description(config.get('rss','description'))
		fg.author({'name': config.get('rss','author_name'), 'email':config.get('rss','author_email')})
		fg.language(config.get('rss','language'))
		fg.link(href=config.get('rss','href'), rel='related')

		client = EmailClient()

		for msgn in reversed(client.listBox(config.get('imap','mailbox'))[:config.getint('rss','maxitems')]):
			cherrypy.log("RSS Entry: "+msgn)
			em = client.getEMail(msgn)
			entry = fg.add_entry()
			entry.title(em['subject'])
			entry.author({'name': em['From']['name'], 'email': em['From']['email']})
			entry.guid(config.get("main","baseurl")+'news/'+msgn)
			entry.link({'href':config.get("main","baseurl")+'news/'+msgn, 'rel':'alternate'})
			entry.pubdate(em['date'])
			entry.content(em['body'])
		return	fg.rss_str(pretty=True)
Exemplo n.º 5
0
def main():

    client = moduleSocial.connectTumblr()

    posts = client.posts('fernand0')
    
    fg = FeedGenerator()
    fg.id(posts['blog']['url'])
    fg.title(posts['blog']['title'])
    fg.author( {'name':posts['blog']['name'],'email':'*****@*****.**'} )
    fg.link( href=posts['blog']['url'], rel='alternate' )
    fg.subtitle('Alternate feed due to Tumblr GDPR restrictions')
    fg.language('en')

    print(len(posts['posts']))
    for i in range(len(posts['posts'])):
        fe = fg.add_entry()
        print(posts['posts'][i]['post_url'])
        if 'title' in posts['posts'][i]:
            title = posts['posts'][i]['title']
            print('T', posts['posts'][i]['title'])
        else:
            title = posts['posts'][i]['summary'].split('\n')[0]
            print('S', posts['posts'][i]['summary'].split('\n')[0])
        fe.title(title)
        fe.link(href=posts['posts'][i]['post_url'])
        fe.id(posts['posts'][i]['post_url'])

    print(fg.atom_file('/var/www/html/elmundoesimperfecto/tumblr.xml'))

    sys.exit()
Exemplo n.º 6
0
def feed(column_id):
    api = Api(column_id)

    with request.urlopen(api.info) as stream:
        result = stream.read().decode('utf-8')

    if not result:
        return '', 404

    info = json.loads(result)

    with request.urlopen(api.posts) as stream:
        result = stream.read().decode('utf-8')
        entries = json.loads(result)

    fg = FeedGenerator()
    fg.id(str(entries[0]['slug']))
    fg.title(info['name'])
    fg.language('zh_CN')
    fg.icon(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 's'))
    fg.logo(info['avatar']['template'].replace('{id}', info['avatar']['id']).replace('{size}', 'l'))
    fg.description(info['intro'])
    fg.author(dict(name=info['creator']['name']))
    fg.link(href=api.base_url + info['url'], rel='alternate')
    for entry in entries:
        fe = fg.add_entry()
        fe.id(entry['url'])
        fe.title(entry['title'])
        fe.published(entry['publishedTime'])
        fe.updated(entry['publishedTime'])
        fe.author(dict(name=entry['author']['name']))
        fe.link(href=api.base_url + entry['url'], rel='alternate')
        fe.content(entry['content'])

    return fg.atom_str(pretty=True)
Exemplo n.º 7
0
def podcast_feed():
    logo_url = url_for("static", filename="wpclogo_big.png", _external=True)

    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.podcast.itunes_category('Technology', 'Podcasting')
    fg.podcast.itunes_image(logo_url)
    fg.author({'name': 'Nathan Kellert', 'email': '*****@*****.**'})
    fg.link(href='http://watchpeoplecode.com/podcast_feed.xml', rel='self')
    fg.title('WPC Coders Podcast')
    fg.description('WPC Coders Podcast is a weekly peek into the lives of developers and the WatchPeopleCode community. Our goal is to keep our listeners entertained by giving them new and interesting insights into our industry as well as awesome things happening within our own community. Here, you can expect hear about some of the latest news, tools, and opportunities for developers in nearly every aread of our industry. Most importantly, we hope to have some fun and a few laughs in ways only other nerds know how.')  # NOQA

    episodes = [('ep1.mp3', 'Episode 1', datetime(2015, 02, 21, 23), 'Learn all about the WPC hosts, and where we came from in Episode 1!'),
                ('ep2.mp3', 'Episode 2', datetime(2015, 02, 28, 23), 'This week we cover your news, topics and questions in episode 2!'),
                ('ep3.mp3', 'Episode 3', datetime(2015, 03, 07, 23), "On todays podcast we talk to WatchPeopleCode's founder Alex Putilin. Hear about how the reddit search engine thousands watched him write. Also, hear the inside scoop of how WatchPeopleCode got started!"),  # NOQA
                ('ep4.mp3', 'Episode 4', datetime(2015, 03, 14, 23), "This week we talk to FreeCodeCamps Quincy Larson(http://www.freecodecamp.com) about their project that combines teaching new developers how to code and completing projects for non-profits! Lets find out how this group of streamers code with a cause!")]  # NOQA

    for epfile, eptitle, epdate, epdescription in episodes[::-1]:
        epurl = "https://s3.amazonaws.com/wpcpodcast/{}".format(epfile)
        fe = fg.add_entry()
        fe.id(epurl)
        fe.title(eptitle)
        fe.description(epdescription)
        fe.podcast.itunes_image(logo_url)
        fe.pubdate(epdate.replace(tzinfo=pytz.UTC))
        fe.enclosure(epurl, 0, 'audio/mpeg')

    return Response(response=fg.rss_str(pretty=True),
                    status=200,
                    mimetype='application/rss+xml')
Exemplo n.º 8
0
def main():
    session = vk.Session()
    api = vk.API(session)

    group_id = '96469126'

    group_info = api.groups.getById(group_ids=group_id, fields=['description', 'site', 'name', 'photo', 'gid'])

    assert len(group_info) == 1
    group_info = group_info[0]

    url = 'http://vk.com/club{}'.format(group_info['gid'])
    # a = api.wall.get(owner_id=-1 * group_info['gid'])
    #
    # with open('out', 'wb') as fio:
    #     pickle.dump(a, fio)

    with open('out', 'rb') as fio:
        data = pickle.loads(fio.read())

    assert len(data) > 1

    fg = FeedGenerator()
    fg.id(url)
    fg.title(_(group_info['name']))
    fg.description(_(group_info['description']))
    fg.logo(group_info['photo'])
    site_url = group_info.get('site', url) if group_info.get('site', url) else url
    fg.link(href=_(site_url))
    fg.link(href=_(site_url), rel='self')
    fg.link(href=_(site_url), rel='alternate')
    fg.author({'name': 'Alexander Sapronov', 'email': '*****@*****.**'})
    fg.webMaster('[email protected] (Alexander Sapronov)')

    pat = re.compile(r"#(\w+)")

    for x in data[1:]:
        post_link = "{}?w=wall-{}_{}".format(url, group_info['gid'], x['id'])
        e = fg.add_entry()
        # text = x.get('text', '').replace('<br>', '\n')
        text = x.get('text', '')

        e.description(_(text))
        e.author({'name': _(get_author_name(api, x.get('from_id')))})
        e.id(post_link)
        e.link(href=_(post_link))
        e.link(href=_(post_link), rel='alternate')

        tags = pat.findall(text)

        title = x.get('text', '')
        for tag in tags:
            e.category(term=_(tag))
            title = title.replace('#{}'.format(tag), '')

        title = re.sub('<[^<]+?>', ' ', title)
        title = textwrap.wrap(title, width=80)[0]
        e.title(_(title.strip()))

    fg.rss_file('rss.xml')
Exemplo n.º 9
0
def feed():
    """
    Generate atom feed
    """
    entries = parse_posts(0, C.feed_count)
    fg = FeedGenerator()
    fg.id(str(len(entries)))
    fg.title(C.title)
    fg.subtitle(C.subtitle)
    fg.language(C.language)
    fg.author(dict(name=C.author, email=C.email))
    fg.link(href=C.root_url, rel='alternate')
    fg.link(href=make_abs_url(C.root_url, 'feed'), rel='self')
    for entry in entries:
        fe = fg.add_entry()
        fe.id(entry.get('url'))
        fe.title(entry.get('title'))
        fe.published(entry.get('date'))
        fe.updated(entry.get('updated') or entry.get('date'))
        fe.link(href=make_abs_url(C.root_url, entry.get('url')), rel='alternate')
        fe.author(dict(name=entry.get('author'), email=entry.get('email')))
        fe.content(entry.get('body'))

    atom_feed = fg.atom_str(pretty=True)
    return atom_feed
Exemplo n.º 10
0
class YoutubeFeed:  
    ydl_opts = {
        'format': 'bestaudio/best',
        'outtmpl': '%(id)s.%(ext)s',
        'postprocessors': [{
            'key': 'FFmpegExtractAudio',
            'preferredcodec': 'mp3',
            'preferredquality': '192',
        }]
    }

    def __init__(self, name):
        self.name = name
        self.ydl = youtube_dl.YoutubeDL(self.ydl_opts)

        self.fg = FeedGenerator()
        self.fg.title(name)
        self.fg.author({"name": "Youtube Audio Feed", "email": ""})
        self.fg.link(href="http://www.foo.bar.baz.com", rel="alternate")
        self.fg.description("Personalized Youtube audio feed")
        self.fg.generator("")
        self.fg.docs("")

    def add_video(self, url):
        info = self.ydl.extract_info(url, download=True)
        entry = self.fg.add_entry()
        entry.id(info['id'])
        entry.title(info['title'])
        entry.description(info['description'])
        entry.enclosure(info['id'] + ".mp3", str(info['duration']), 'audio/mpeg')

    def save(self):
        self.fg.rss_file(name + '.xml')
Exemplo n.º 11
0
def generate(app, category, torrents):
    """
    generate an rss feed from category with torrents as results
    if category is None this feed is for all categories
    """
    feed = FeedGenerator()
    if category:
        url = util.fullSiteURL(app, 'feed', '{}.rss'.format(category))
    else:
        url = util.fullSiteURL(app, 'feed', 'all.rss')
    feed.link(href=url, rel="self")
    feed.id(url)
    if category:
        title = "new {} torrents on index ex invisibilis".format(category)
    else:
        title = "new torrents on index ex invisibilis"
    feed.title(title)
    feed.description(title)
    feed.author({"name": "anonymous"})
    feed.language("en")
    for torrent in torrents:
        item = feed.add_entry()
        url = util.fullSiteURL(app, torrent.downloadURL())
        item.id(torrent.infohash)
        item.link(href=url)
        item.title(torrent.title)
        item.description(torrent.summary(100))
    return feed
Exemplo n.º 12
0
def _filter_fb_rss_feeed(url):
    parsed_feed = feedparser.parse(url)
    filtered_entries = filter(
        lambda x: ' shared a link: "' in x.title, parsed_feed.entries)

    fg = FeedGenerator()
    fg.id('https://fb-notifications-to-pocket.herokuapp.com/')
    fg.title('Facebook Notifications to Pocket')
    fg.author({'name': 'Pankaj Singh', 'email': '*****@*****.**'})
    fg.description(
        '''Filter FB notifications which contain a link and generate a new rss feed which will be used by IFTTT''')
    fg.link(href='https://fb-notifications-to-pocket.herokuapp.com/')

    for entry in filtered_entries:
        root = etree.HTML(entry.summary_detail.value)
        title = entry.title.split(" shared a link: ")[1].strip()[1:-2]
        author_name = entry.title.split(" shared a link: ")[0].strip()
        url = urlparse.parse_qs(
            urlparse.urlparse(root.findall(".//a")[-1].attrib["href"]).query)["u"][0]

        title = get_title_for_url(url) or title

        fe = fg.add_entry()
        fe.id(entry.id)
        fe.link(href=url)
        fe.published(entry.published)
        fe.author({'name': author_name})
        fe.title(title)

    return fg.atom_str(pretty=True)
Exemplo n.º 13
0
 def generate_feed(page=1):
     feed = FeedGenerator()
     feed.id("https://pub.dartlang.org/feed.atom")
     feed.title("Pub Packages for Dart")
     feed.link(href="https://pub.dartlang.org/", rel="alternate")
     feed.link(href="https://pub.dartlang.org/feed.atom", rel="self")
     feed.description("Last Updated Packages")
     feed.author({"name": "Dart Team"})
     i = 1
     pager = QueryPager(int(page), "/feed.atom?page=%d",
                        Package.all().order('-updated'),
                        per_page=10)
     for item in pager.get_items():
         i += 1
         entry = feed.add_entry()
         for author in item.latest_version.pubspec.authors:
             entry.author({"name": author[0]})
         entry.title("v" + item.latest_version.pubspec.get("version") +\
             " of " + item.name)
         entry.link(link={"href": item.url, "rel": "alternate",
             "title": item.name})
         entry.id(
             "https://pub.dartlang.org/packages/" + item.name + "#" +\
             item.latest_version.pubspec.get("version"))
         entry.description(
             item.latest_version.pubspec
             .get("description", "Not Available"))
         readme = item.latest_version.readme
         if not readme is None:
             entry.content(item.latest_version.readme.render(), type='html')
         else:
             entry.content("<p>No README Found</p>", type='html')
     return feed
Exemplo n.º 14
0
  def makeRss(self):
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.id('http://hypecast.blackmad.com/' + self.mode)
    fg.title('Hype Machine Robot Radio: ' + self.mode)
    fg.author( {'name':'David Blackmad','email':'*****@*****.**'} )
    fg.logo('http://dump.blackmad.com/the-hype-machine.jpg')
    fg.language('en')
    fg.link(href='http://hypecast.blackmad.com/' + self.mode)
    fg.description('Hype Machine Robot Radio: ' + self.mode)

    description = ' <br/>'.join(['%s. %s' % (index + 1, self.mk_song_id(s)) for index, s in enumerate(self.songs)])

    fe = fg.add_entry()
    fe.title(self.track_name)
    fe.description(description)
    fe.id(self.filename)
    # add length
    print(self.relative_dir)
    print(self.filename)
    fe.enclosure(url = 'http://hypecast.blackmad.com/%s' % (self.filename), type="audio/mpeg")

    rss_str = fg.rss_str()
    newItem = ET.fromstring(rss_str)[0].find('item')
    out = open(self.get_filename('xml'), 'w')
    out.write(ET.tostring(newItem))
    out.close()
    self.updateRss()
Exemplo n.º 15
0
    def build_feed(self):
        "Build the feed given our existing URL"
        # Get all the episodes
        page_content = str(requests.get(self.url).content)
        parser = BassdriveParser()
        parser.feed(page_content)
        links = parser.get_links()

        # And turn them into something usable
        fg = FeedGenerator()
        fg.id(self.url)
        fg.title(self.title)
        fg.description(self.title)
        fg.author({'name': self.dj})
        fg.language('en')
        fg.link({'href': self.url, 'rel': 'alternate'})
        fg.logo(self.logo)

        for link in links:
            fe = fg.add_entry()
            fe.author({'name': self.dj})
            fe.title(link[0])
            fe.description(link[0])
            fe.enclosure(self.url + link[1], 0, 'audio/mpeg')

            # Bassdrive always uses date strings of
            # [yyyy.mm.dd] with 0 padding on days and months,
            # so that makes our lives easy
            date_start = link[0].find('[')
            date_str = link[0][date_start:date_start+12]
            published = datetime.strptime(date_str, '[%Y.%m.%d]')
            fe.pubdate(UTC.localize(published))
            fe.guid((link[0]))

        return fg
Exemplo n.º 16
0
def create_mock_fg():
    fg = FeedGenerator()
    fg.id(FEED_ID)
    fg.title('Some Test Feed')
    fg.author({'name': 'Edfward', 'email': '*****@*****.**'})
    fg.subtitle('Test feed subtitle!')
    fg.link(href=FEED_ID, rel='self')
    fg.language('en')
    return fg
Exemplo n.º 17
0
def rss(conversation,
        url,
        author_name,
        author_email,
        title,
        subtitle,
        language,
        output_path):
    """Export all the links of the conversation in a simple RSS feed"""
    from feedgen.feed import FeedGenerator
    fg = FeedGenerator()
    fg.id(url)
    fg.title(title)
    fg.author(
        {
            'name': author_name,
            'email': author_email,
        }
    )
    fg.link(
        href=url,
        rel='alternate'
    )
    if subtitle:
        fg.subtitle(subtitle)
    fg.language(language)
    for message in conversation.history():
        match = re.search(
            "^.*<(?P<url>[^>|]+)\|?(?P<title>[^>]+)?>.*$",
            message.data["text"],
            flags=re.MULTILINE
        )
        if match is not None:
            fe = fg.add_entry()
            link = match.group("url")
            title = match.group("title") or link
            date = naive_to_local(datetime.datetime.fromtimestamp(float(message.data["ts"])))
            description = message.data["text"]
            if "attachments" in message.data:
                attachment = [a for a in message.data["attachments"] if
                              a["title_link"] == link][0]
                title += " | " + attachment["title"]
                description += """

""" + attachment["text"]
            fe.id(link)
            fe.title(title)
            fe.link(href=link)
            fe.published(date)
            user = config.slack.get_user(message.data["user"])
            author = {
                "name": message.data["username"],
                "email": user.email or "noemail",
            }
            fe.author(author)
            fe.description(description)
    fg.rss_file(output_path, pretty=True)
Exemplo n.º 18
0
 def init_fg(self, repo_info):
     fg = FeedGenerator()
     title = 'Recent commits to ' + repo_info['full_name']
     fg.title(title)
     fg.link(href=repo_info['html_url'])
     fg.updated(repo_info['updated_at'])
     fg.id(repo_info['html_url'])
     fg.author(repo_info['author'])
     return fg
Exemplo n.º 19
0
def generate_empty_rss_feed(group_name):

    fg = FeedGenerator()
    fg.title("VTS Raspored - Grupa " + group_name)
    fg.author( {'name':'Veselin Romic','email':'*****@*****.**'} )
    fg.language('sr')
    fg.description("Automatski se salje notifikacija kad se promeni grupni raspored.")
    fg.link(href='https://eref.vts.su.ac.rs/')

    return fg.rss_str(pretty=True)
Exemplo n.º 20
0
def main(argv):
    ap = argparse.ArgumentParser(
        description='''
Render RSS and Atom feeds from a CSV of food inspection data.
''')
    ap.add_argument(
        '-v', '--verbose', action='count', dest='verbosity', default=0,
        help='increase global logging verbosity; can be used multiple times')
    ap.add_argument(
       '-f', '--format', choices=['rss', 'atom'], default='atom',
       help='''
specify the format to use when rendering the feed (default: %(default)s)')
''')
    ap.add_argument(
        '-n', '--num_incidents', metavar='<num>', type=int, default=10,
        help='render <num> recent incidents in the feed (default: %(default)s)')
    ap.add_argument(
        'flavor', nargs='?', default='all', choices=['all', 'failures'],
        help='select the flavor of feed to render (default: %(default)s)')

    args = ap.parse_args()

    logging.basicConfig(
            level=logging.ERROR - args.verbosity * 10,
            style='{',
            format='{}: {{message}}'.format(ap.prog))

    fg = FeedGenerator()
    fg.id('http://pgriess.github.io/dallas-foodscores/')
    fg.link(href=fg.id(), rel='self')
    fg.title('Dallas Food Inspection Scores')
    fg.subtitle('''
Food inspection scores from the official City of Dallas dataset; updated daily
''')
    fg.description(fg.subtitle())
    fg.language('en')
    fg.author(
        name='Peter Griess',
        email='*****@*****.**',
        uri='https://twitter.com/pgriess')

    for i in get_inspections_to_feed(sys.stdin, args.num_incidents,
            args.flavor):
        fe = fg.add_entry()
        fe.title('{name} at {address} scored {score}'.format(
            name=i.name, address=i.address, score=i.score))
        fe.id(fg.id() + '#!/' + str(abs(hash(i))))
        fe.link(href=fe.id(), rel='alternate')
        fe.content(fe.title())
        fe.published(TZ.localize(i.date))

    if args.format == 'atom':
        print(fg.atom_str(pretty=True))
    else:
        print(fg.rss_str(pretty=True))
Exemplo n.º 21
0
def rss(request):
    # track it!
    #   v=1              // Version.
    #   &tid=UA-XXXXX-Y  // Tracking ID / Property ID.
    #   &cid=555         // Anonymous Client ID.

    #   &t=pageview      // Pageview hit type.
    #   &dh=mydemo.com   // Document hostname.
    #   &dp=/home        // Page.
    #   &dt=homepage     // Title.
    angrates_uuid = uuid.UUID('f93c5388-f60b-5159-bbfc-d08d6f7b401f')
    x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
    if x_forwarded_for:
        ip = x_forwarded_for.split(',')[0]
    else:
        ip = request.META.get('REMOTE_ADDR')

    cid = uuid.uuid5(angrates_uuid, ip)

    data = {
        'v': 1,
        'tid': 'UA-19269567-1',
        'cid': cid,
        't': 'pageview',
        'dh': 'armstrongandgettybingo.com',
        'dp': '/rss/',
        'dt': 'Podcast',
    }

    requests.post('https://www.google-analytics.com/collect', data=data)

    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.id('http://www.armstrongandgettybingo.com/rss')
    fg.podcast.itunes_category('News & Politics', 'Conservative (Right)')
    fg.podcast.itunes_explicit('no')
    fg.title('The Armstrong and Getty Show (Bingo)')
    fg.author( {'name':'Ben Friedland','email':'*****@*****.**'} )
    fg.link( href='http://www.armstrongandgettybingo.com', rel='alternate' )
    fg.logo('https://s3-us-west-1.amazonaws.com/bencast/bingologo.png')
    fg.subtitle('Armstrong and Getty Bingo')
    fg.description('The Armstrong and Getty Show - Unofficial Feed including Archives back to 2001.')
    fg.link( href='http://www.armstrongandgettybingo.com/rss', rel='self' )
    fg.language('en')
    pacific = pytz.timezone('America/Los_Angeles')

    for hour in Hour.objects.all().order_by('-pub_date'):
        fe = fg.add_entry()
        fe.id(hour.link)
        fe.title(hour.title)
        fe.description(hour.description)
        fe.enclosure(hour.link, 0, 'audio/mpeg')
        fe.published(pacific.localize(hour.pub_date))
    return HttpResponse(fg.rss_str(pretty=True), content_type='application/rss+xml')
Exemplo n.º 22
0
def create_fg():
  # Create the feed
  fg = FeedGenerator()
  fg.id("http://www.accre.vanderbilt.edu")
  fg.title("ACCRE's Status Feed")
  fg.author(dict(name="Josh Arnold", email="*****@*****.**"))
  fg.link(href="http://www.accre.vanderbilt.edu", rel="alternate")
  fg.logo("http://www.accre.vanderbilt.edu/"
          "wp-content/themes/ashford/favicon.ico")
  fg.subtitle("ACCRE's Status Feed")
  fg.language('en')
  return fg
Exemplo n.º 23
0
 def get_feedgenerator(self):
     fg = FeedGenerator()
     fg.id('http://pod.w-me.net')
     fg.title('W-Me Podcast')
     fg.description('W-Me podcast')
     fg.author( {'name':'Alex Dai','email':'*****@*****.**'} )
     fg.link( href='http://pod.w-me.net', rel='alternate' )
     fg.logo('http://pandodaily.files.wordpress.com/2012/08/shutterstock_58664.jpg')
     #fg.subtitle('This is a cool feed!')
     fg.link( href='http://pod.w-me.net/feed.atom', rel='self' )
     fg.language('en')
     fg.load_extension('podcast')
     fg.podcast.itunes_category('Technology', 'Podcasting')   
     return fg
Exemplo n.º 24
0
    def get(self):
        fg = FeedGenerator()
        fg.id("http://test.ts")
        fg.title("My Test Feed")
        fg.icon("https://avatars1.githubusercontent.com/u/715660?v=3&s=32")
        fg.author({'name': "The Author", 'email': "*****@*****.**"})

        fg.link(href="http://example.org/index.atom?page=2", rel="next")

        fg.link(href="http://test.ts", rel="alternate")
        fg.logo("https://avatars1.githubusercontent.com/u/715660?v=3&s=32")
        fg.description("Este é o monstro do lago 1")
        fg.subtitle("This is an example feed!")
        fg.language("en-us")
        # Handle this:
        #< sy:updatePeriod > hourly < / sy:updatePeriod >
        #< sy:updateFrequency > 1 < / sy:updateFrequency >

        fg.lastBuildDate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        fi = fg.add_item()
        fi.id("http://test.ts/id/1", )
        #fi.link(link="http://test.ts/id/1")
        fi.title("Monstro do Lago 1")
        fi.description("Este é o monstro do lago 1")
        fi.comments("http://test.ts/id/1/comments")
        fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        fi = fg.add_item()
        fi.id("http://test.ts/id/2")
        fi.title("Monstro do Lago 2")
        fi.description("Este é o monstro do lago 2")
        fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        #test = fg.atom_str(pretty=True)

        rss_str = fg.rss_str(pretty=True)
        self.set_header("Content-Type", 'application/xml; charset="utf-8"')
        #self.set_header("Content-Disposition",
        # "attachment; filename='test.xml'")
        self.write(rss_str)


        #if regexp.search(word) is not None:
        #    print
        #    'matched'
        if self.is_browser_mobile():
            print("buu")
        else:
            print(self.request.headers["User-Agent"])
Exemplo n.º 25
0
def render_feed(text_paths, outpath):
    # http://rhodesmill.org/brandon/feed
    # http://rhodesmill.org/brandon/category/python/feed
    # http://rhodesmill.org/brandon/feed/atom/

    t0 = datetime.min.time()

    def fix(d):
        dt = datetime.combine(d, t0)
        return timezone('US/Eastern').localize(dt)

    posts = [post_info(path) for path in text_paths if date_of(path)]
    posts = sorted(posts, key=lambda post: post['date'])
    posts = posts[-1:]
    most_recent_date = max(post['date'] for post in posts)

    def full(url):
        return 'http://rhodesmill.org/' + url.lstrip('/')

    fg = FeedGenerator()
    fg.id(full('/'))
    fg.author({'name': 'Brandon Rhodes'})
    fg.language('en')
    fg.link(href=full('/brandon/'), rel='alternate')
    if 'python' in outpath:
        fg.link(href=full('/brandon/category/python/feed/'), rel='self')
    else:
        fg.link(href=full('/brandon/feed/'), rel='self')
    fg.subtitle('Thoughts and ideas from Brandon Rhodes')
    fg.title("Let's Discuss the Matter Further")
    fg.updated(fix(most_recent_date))

    for post in posts:
        url = full(post['url_path'])
        excerpt = truncate_at_more(post['body_html'], url)

        fe = fg.add_entry()
        fe.content(excerpt, type='html')
        fe.guid(url, permalink=True)
        fe.id(url)
        fe.link({'href': url})
        fe.published(fix(post['date']))
        fe.title(post['title'])
        fe.updated(fix(post['date']))

    rss = fg.rss_str(pretty=True)
    fg.link(href=full('/brandon/feed/atom/'), rel='self', replace=True)
    atom = fg.atom_str(pretty=True)

    return rss, atom
Exemplo n.º 26
0
def setup_feed():
    fg = FeedGenerator()

    fg.load_extension("podcast")

    fg.language("en")
    fg.id("https://jdelman.me/potato")
    fg.author(name="Potato", email="*****@*****.**")
    fg.link(href="https://jdelman.me/potato", rel="alternate")
    fg.logo("https://jdelman.me/static/potato.jpg")
    fg.title("Potato - Josh's Saved Videos")
    fg.subtitle("Automatically generated RSS.")

    return fg
Exemplo n.º 27
0
def create_feed_container(app):
    #from feedformatter import Feed
    feed = FeedGenerator()
    feed.title(app.config.project)
    feed.link(href=app.config.feed_base_url)
    feed.author(dict(name=app.config.feed_author))
    feed.description(app.config.feed_description)
    
    if app.config.language:
        feed.language(app.config.language)
    if app.config.copyright:
        feed.copyright(app.config.copyright)
    app.builder.env.feed_feed = feed
    if not hasattr(app.builder.env, 'feed_items'):
        app.builder.env.feed_items = {}
    def add_feed(self, feed_id, yt_playlist):
        feed = FeedGenerator()

        feed.load_extension("podcast")
        feed.id(feed_id)
        feed.title(yt_playlist["snippet"]["title"])
        feed.author({"name": yt_playlist["snippet"]["channelTitle"]})
        feed.description(yt_playlist["snippet"]["description"])
        feed.logo(yt_playlist["snippet"]["thumbnails"]["standard"]["url"])
        feed.link(href="https://www.youtube.com/playlist?list=%s" % (yt_playlist["id"]))
        feed.rss_str(pretty=True)
        feed.last_updated = 0

        self.feeds[feed_id] = feed

        return feed
Exemplo n.º 29
0
def _generate_feed(feed, feeds_config, atom_path):
    utcnow = arrow.utcnow()
    fg = FeedGenerator()
    fg.title(feeds_config['feeds'][feed]['title'])
    fg.id(_build_tag_uri('2015-12-12', feed, utcnow, feeds_config, atom_path))
    fg.language('en-ca')
    fg.author(name='Salish Sea MEOPAR Project',
              uri=f'https://{feeds_config["domain"]}/')
    fg.rights(f'Copyright 2015-{utcnow.year}, Salish Sea MEOPAR Project '
              f'Contributors and The University of British Columbia')
    fg.link(href=(f'https://{feeds_config["domain"]}/{atom_path}/{feed}'),
            rel='self',
            type='application/atom+xml')
    fg.link(href=f'https://{feeds_config["domain"]}/storm-surge/forecast.html',
            rel='related',
            type='text/html')
    return fg
Exemplo n.º 30
0
    def _build_feed(self):
        router = Router(self._config)
        fp = filterPlaceholders

        fg = FeedGenerator()
        fg.id(self._config.site_prefix)
        fg.title(self._config.site_name)
        fg.author({
            'name': fp(self._config.author),
            'email': fp(self._config.email)
        })
        fg.link(href=self._config.domain_name + self._config.site_prefix,
                rel='alternate')
        fg.logo(fp(self._config.site_logo))
        fg.subtitle(fp(self._config.description))
        fg.description(fp(self._config.description) or ' ')
        fg.language(fp(self._config.language))
        fg.lastBuildDate(moment.now().locale(self._config.locale).date)
        fg.pubDate(moment.now().locale(self._config.locale).date)

        for post in self._posts[:10]:
            meta = post.meta
            fe = fg.add_entry()
            fe.title(meta['title'])
            fe.link(href=self._config.domain_name +
                    router.gen_permalink_by_meta(meta))
            fe.guid(router.gen_permalink_by_meta(meta), True)
            fe.pubDate(meta['date'].date)
            fe.author({
                'name': meta['author'],
                'uri': fp(self._config.author_homepage),
                'email': fp(self._config.email)
            })
            fe.content(post.parsed)

        if not os.path.exists(
                unify_joinpath(self._config.build_dir, 'feed/atom')):
            os.makedirs(unify_joinpath(self._config.build_dir, 'feed/atom'))

        fg.rss_file(unify_joinpath(self._config.build_dir, 'feed/index.xml'))
        fg.rss_file(unify_joinpath(self._config.build_dir, 'feed/index.html'))
        fg.atom_file(
            unify_joinpath(self._config.build_dir, 'feed/atom/index.xml'))
        fg.atom_file(
            unify_joinpath(self._config.build_dir, 'feed/atom/index.html'))
Exemplo n.º 31
0
def get_fg(url, social=False):
  res = requests.get(url)
  soup = BeautifulSoup(res.text)
#load the articles into classes
  articles = []

  author_tag = soup.findAll("div", attrs={"class":"author-header"})
  #at = author_tag.findAll("div", attrs={"class":"name"})
  author = None
  if len(author_tag)>0:
    at = author_tag[0].findAll(attrs={"class":"name"})
    #author = ' '.join(author_tag[0].get_text().split())
    author = at[0].text.strip()

  for article in soup.findAll(attrs={"class":"article"}):
    articles.append(AtlanticArticle(article, author=author,social=social))

  #import pdb; pdb.set_trace()
#set up the feed, with basic metadata
  fg = FeedGenerator()
  fg.link(href=url)
  if(author is None and len(articles)>0):
    fg.author(name=articles[0].bylines[0])
  else:
    fg.author(name=author)

  title_tag = soup.findAll(attrs={"class":"display-category"})
#set the title if there's not a category -- e.g. it's a person's page
  if(len(title_tag)>0):
    title = ' '.join(title_tag[0].get_text().split())
  else:
    title = "Atlantic posts by {0}".format(author.encode('ascii', 'ignore'))
  fg.title(title)

#set the description
  description = soup.findAll(attrs={"class":"bio"})
  if len(description)>0:
    fg.description(' '.join(description[0].get_text().split()))
  else:
    fg.description("RSS Feed for {0}, generated by Pond Hopper 0.1".format(title))

#add each article to the feed
  for article in articles:
    article.append_feedgen(fg.add_entry())
  return fg, articles
Exemplo n.º 32
0
    def build(self, rss=False):
        if rss:
            self.site.rss_url = '/rss.xml'
            fg = FeedGenerator()
            fg.title(self.site.name)
            fg.author({'name': self.site.author})
            fg.link(href=self.site.base_url, rel='alternate')
            fg.subtitle(self.site.description)

        start = time.time()
        getLogger().info("Copy Assets")
        self.output.copyAssets(self.basedir)

        getLogger().info("Start Build of static content")
        posts = []
        for p in self.source.getPosts():
            if not p.published:
                getLogger().info("Ingnoring draft Post %s (%s)", p.title, p.slug)
                continue

            posts.append(p)
            if rss:
                fe = fg.add_entry()
                fe.id("%s/%s" % (self.site.base_url, p.permalink))
                fe.link(href=fe.id(), rel='alternate')
                fe.title(p.title)
                fe.published(p.created_at.replace(tzinfo=pytz.timezone(self.site.timezone)))
                category = []
                for t in p.tags:
                    category.append({'term': t})
                fe.category(category)
                fe.content(p.content)

            Output.storeData(os.path.join(self.basedir, p.permalink), self.output.render(self.site, post=p))
            getLogger().debug("Adding Post \"%s\" (%s)", p.title, p.slug)

        posts = sorted(posts, key=lambda k: k.created_at, reverse=True)
        Output.storeData(os.path.join(self.basedir, 'index.html'), self.output.render(self.site, posts=posts, post=None, is_home=True, pagination=None))

        if rss:
            Output.storeData(os.path.join(self.basedir, 'rss.xml'), fg.rss_str(pretty=True))
            getLogger().debug("You awesome RSS feed has been generated")


        getLogger().info("It took %d seconds to generate your awesome blog" % (time.time() - start))
Exemplo n.º 33
0
def user_rss(user, subnodes):
    fg = FeedGenerator()
    # not sure what this field is for
    fg.id(f'https://anagora.org/feed/@{user}.rss')
    fg.title(f'Agora feed for user @{user}')
    fg.author({'name': 'anagora.org/@{user}', 'email': '*****@*****.**'})
    fg.logo('https://anagora.org/favicon.ico')
    fg.subtitle('The Agora is a crowdsourced distributed knowledge graph.')
    fg.link(href=f'https://anagora.org/feed/@{user}', rel='self')
    fg.language('en')
    for subnode in subnodes:
        fe = fg.add_entry()
        fe.id(f'{subnode.uri}')
        fe.title(f'{subnode.uri}')
        fe.content(f'{subnode.content}')
        fe.description(f'A post by user @{user} in node [[{subnode.node}]].')
        fe.link(href=f'https://anagora.org/@{user}/{subnode.node}')
    return fg.rss_str(pretty=True)
Exemplo n.º 34
0
def gen_feed():
    fg = FeedGenerator()
    fg.id(f"{ID}")
    fg.title(f"{USERNAME} notes")
    fg.author({"name": USERNAME, "email": "*****@*****.**"})
    fg.link(href=ID, rel="alternate")
    fg.description(f"{USERNAME} notes")
    fg.logo(ME.get("icon", {}).get("url"))
    fg.language("en")
    for item in DB.activities.find(
        {"box": Box.OUTBOX.value, "type": "Create"}, limit=50
    ):
        fe = fg.add_entry()
        fe.id(item["activity"]["object"].get("url"))
        fe.link(href=item["activity"]["object"].get("url"))
        fe.title(item["activity"]["object"]["content"])
        fe.description(item["activity"]["object"]["content"])
    return fg
Exemplo n.º 35
0
    def create_feed_generator(self, content_name: str) -> FeedGenerator:
        fg = FeedGenerator()
        fg.id(self.website_id)
        fg.title(self.website_title)
        fg.description(self.website_description)
        fg.author(self.website_author)
        fg.link(href=self.website_url, rel="alternate")
        fg.language(self.website_language)

        for c in self.content_modules[content_name]:
            fe = fg.add_entry()
            fe.id(f"{self.website_url}?content={str(c.key)}")
            fe.title(c.long_title)
            fe.published(c.content_date)
            fe.description(c.long_title)
            fe.link(href=f"{self.website_url}?content={str(c.key)}")

        return fg
Exemplo n.º 36
0
def render_rss():
    feed_url = urljoin(URL, FEED_NAME)
    fg = FeedGenerator()
    fg.title("Ian Kinsey")
    fg.subtitle("Thoughts in software development")
    fg.author({"name": "Ian Kinsey", "email": "*****@*****.**"})
    fg.link(href=URL, rel="alternate")
    fg.link(href=feed_url, rel="self")
    fg.language("en")

    for name in ARTICLES:
        article = get_article(name)
        entry = fg.add_entry()
        entry.title(article['title'])
        entry.published(article['date'])
        entry.link(href=article['url'])

    fg.rss_file(join(DIST_DIR, FEED_NAME))
Exemplo n.º 37
0
    def _generate_feed_from_last_articles(cls,
                                          changelog,
                                          registry,
                                          how_many=10):
        fg = FeedGenerator()
        fg.title(settings.blog_name)
        fg.author({'name': settings.twitter_handle.replace("@", "")})
        fg.link(href=settings.blog_url, rel='alternate')
        fg.link(href=changelog.atom_feed_url, rel='self')
        fg.id(changelog.atom_feed_url)
        fg.language('en')

        for cnt, post in enumerate(changelog.posts):
            if cnt >= how_many:
                break

            cls._add_item_to_feed(registry, fg, post)

        return fg.atom_str(pretty=True)
Exemplo n.º 38
0
def generate_rss(pages_info=None):
    fg = FeedGenerator()
    fg.id(conf['base_url'])
    fg.title(conf['title'])
    fg.author({'name': conf['author'], 'email': conf['email']})
    fg.link(href=conf['base_url'], rel='alternate')
    fg.subtitle(conf['description'])
    fg.link(href=conf['base_url'] + '/rss.xml', rel='self')
    fg.language('en')
    for post in pages_info:
        fe = fg.add_entry()
        fe.id('http://blog.elijahcaine.me/' + post['url'])
        fe.title(post['title'])
        fe.author({'name': conf['author'], 'email': conf['email']})
        fe.link(href=conf['base_url'] + post['url'], rel='alternate')
        fe.description(post['content']['fragment'])
    rssfeed = fg.rss_str(pretty=True)
    fg.rss_file('build/' + conf['rss_feed'])
    return rssfeed
Exemplo n.º 39
0
def saveFeed(listings, title, path):

    url = githubRepoURL + title + ".xml"

    # Create a feed generator
    fg = FeedGenerator()

    # Create the feed's title
    fg.id(url)
    fg.title(title)
    fg.author({'name': 'Ben Snell'})
    fg.description("NYC 2BR Apartment Listings in " + title)
    fg.link(href=url, rel='alternate')
    fg.language('en')
    time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "-05:00"
    fg.pubDate(time)
    fg.updated(time)

    for apt in listings:

        e = fg.add_entry()

        e.id(apt[0])
        e.title("$" + apt[1] + "  //  " + apt[4])
        e.link(href=apt[0])

        text = ""
        if apt[5] != "":
            imgs = apt[5].split(" ")
            for i in range(len(imgs)):
                text += "<img src=\"" + imgs[i] + "\" /> "
                if i == 0:
                    text += "<p>" + apt[8] + "</p>"
        else:
            text += "<p>" + apt[8] + "</p>"
        e.content(type="html", content=text)

        # This doesn't seem to work:
        e.pubDate(datetime2RSSString(clDate(apt[2])))
        e.updated(datetime2RSSString(clDate(apt[2])))

    fg.atom_str(pretty=True)
    fg.atom_file(path)
Exemplo n.º 40
0
    def render_rss(self, post_list):
        router = Router(self._config)
        fg = FeedGenerator()
        fg.id(self._config.site_prefix)
        fg.title(self._config.site_name)
        fg.author({'name': self._config.author, 'email': self._config.email})
        fg.link(href=self._config.site_prefix, rel='alternate')
        fg.logo(self._config.site_logo)
        fg.subtitle(self._config.description)
        fg.language('zh-CN')
        fg.lastBuildDate(moment.now().locale('Asia/Shanghai').date)
        fg.pubDate(moment.now().locale('Asia/Shanghai').date)

        for post in post_list[:10]:
            meta = post.meta
            fe = fg.add_entry()
            fe.title(meta['title'])
            fe.link(href=router.gen_permalink_by_meta(meta))
            fe.guid(router.gen_permalink_by_meta(meta), True)
            fe.pubDate(meta['date'].date)
            fe.author({
                'name': meta['author'],
                'uri': self._config.author_homepage,
                'email': self._config.email
            })
            fe.content(post.parsed)

        if not os.path.exists(
                Utils.unify_joinpath(self._config.build_dir, 'feed/atom')):
            os.makedirs(
                Utils.unify_joinpath(self._config.build_dir, 'feed/atom'))

        fg.rss_file(
            Utils.unify_joinpath(self._config.build_dir, 'feed/index.xml'))
        fg.rss_file(
            Utils.unify_joinpath(self._config.build_dir, 'feed/index.html'))
        fg.atom_file(
            Utils.unify_joinpath(self._config.build_dir,
                                 'feed/atom/index.xml'))
        fg.atom_file(
            Utils.unify_joinpath(self._config.build_dir,
                                 'feed/atom/index.html'))
Exemplo n.º 41
0
def make_csdn_rss():
    fg = FeedGenerator()
    fg.id('http://blog.csdn.net')
    fg.title(u'CSDN 博客频道')
    fg.author({
        'name': 'pfchai',
    })
    fg.link(href='http://blog.csdn.net', rel='self')
    fg.description(u"csdn 首页博客")

    csdn = CSDN()
    for item in csdn.get_item():
        fe = fg.add_entry()
        fe.id(item['link'])
        fe.title(item['title'])
        fe.author({'name': item['author']})
        fe.description(item['description'])
        fe.content(item['description'])

    return fg
Exemplo n.º 42
0
def feed():
    tz = app.config['TIMEZONE']
    posts = helpers.get_posts()

    feed = FeedGenerator()
    feed.title('%s' % app.config['TITLE'])
    feed.link(href=app.config['BASE_URL'] + url_for('feed'), rel='self')
    feed.subtitle(app.config.get('DESCRIPTION', ""))
    feed.author(name=app.config.get('AUTHOR', ""))
    feed.link(href=app.config['BASE_URL'], rel='alternate')

    for p in posts[:10]:
        post = flatpages.get(p.path)
        if ('POST_LINK_STYLE' in app.config
                and app.config['POST_LINK_STYLE'] == "date"):
            url = "%s/%s" % (app.config['BASE_URL'], p.slug)
        else:
            url = "{}{}".format(
                app.config['BASE_URL'],
                url_for('nested_content',
                        name=p.slug,
                        dir=app.config['POST_DIRECTORY'],
                        ext='html'))

        entry = feed.add_entry()
        entry.title(p.meta['title'])
        entry.guid(guid=url, permalink=True)
        entry.author(name=p.meta.get('author', app.config.get('AUTHOR', "")))
        entry.link(href=url)
        entry.updated(
            timezone(tz).localize(p.meta.get('updated', p.meta['date'])))
        entry.published(timezone(tz).localize(p.meta['date']))
        entry.description(post.meta.get('description', ''))
        # It takes a while to render all of the HTML here but
        # then at least it is in memory and the rest of the
        # build process goes quickly. The rendering has to
        # happen anyway so there isn't any performance increase
        # by not including the full HTML here in content.
        entry.content(post.html)

    return Response(feed.rss_str(pretty=True), mimetype="application/rss+xml")
Exemplo n.º 43
0
def podcast_feed():
    logo_url = url_for("static", filename="wpclogo_big.png", _external=True)

    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.podcast.itunes_category('Technology', 'Podcasting')
    fg.podcast.itunes_image(logo_url)
    fg.author({'name': 'Nathan Kellert', 'email': '*****@*****.**'})
    fg.link(href='http://watchpeoplecode.com/podcast_feed.xml', rel='self')
    fg.title('WPC Coders Podcast')
    fg.description(
        'WPC Coders Podcast is a weekly peek into the lives of developers and the WatchPeopleCode community. Our goal is to keep our listeners entertained by giving them new and interesting insights into our industry as well as awesome things happening within our own community. Here, you can expect hear about some of the latest news, tools, and opportunities for developers in nearly every aread of our industry. Most importantly, we hope to have some fun and a few laughs in ways only other nerds know how.'
    )  # NOQA

    episodes = [
        ('ep1.mp3', 'Episode 1', datetime(2015, 02, 21, 23),
         'Learn all about the WPC hosts, and where we came from in Episode 1!'
         ),
        ('ep2.mp3', 'Episode 2', datetime(2015, 02, 28, 23),
         'This week we cover your news, topics and questions in episode 2!'),
        ('ep3.mp3', 'Episode 3', datetime(2015, 03, 07, 23),
         "On todays podcast we talk to WatchPeopleCode's founder Alex Putilin. Hear about how the reddit search engine thousands watched him write. Also, hear the inside scoop of how WatchPeopleCode got started!"
         ),  # NOQA
        ('ep4.mp3', 'Episode 4', datetime(2015, 03, 14, 23),
         "This week we talk to FreeCodeCamps Quincy Larson(http://www.freecodecamp.com) about their project that combines teaching new developers how to code and completing projects for non-profits! Lets find out how this group of streamers code with a cause!"
         )
    ]  # NOQA

    for epfile, eptitle, epdate, epdescription in episodes[::-1]:
        epurl = "https://s3.amazonaws.com/wpcpodcast/{}".format(epfile)
        fe = fg.add_entry()
        fe.id(epurl)
        fe.title(eptitle)
        fe.description(epdescription)
        fe.podcast.itunes_image(logo_url)
        fe.pubdate(epdate.replace(tzinfo=pytz.UTC))
        fe.enclosure(epurl, 0, 'audio/mpeg')

    return Response(response=fg.rss_str(pretty=True),
                    status=200,
                    mimetype='application/rss+xml')
Exemplo n.º 44
0
    def execute(self):
        """ Run the handler. """
        feedparser.USER_AGENT = 'kwed/6.0'
        rss = feedparser.parse('https://remix.kwed.org/rss.xml')
        feed = FeedGenerator()
        feed.id('https://leah.schau.dk/kwed.xml')
        feed.title('KWED')
        feed.author({
            'name': 'Jan Lund Thomsen',
            'email': '*****@*****.**'
        })
        feed.link(href='https://remix.kwed.org/', rel='alternate')
        feed.logo('https://leah.schau.dk/rko.jpg')
        feed.subtitle('Remix.Kwed.Org')
        feed.link(href='https://leah.schau.dk/kwed.xml', rel='self')
        feed.language('en')
        feed.load_extension('podcast')
        # pylint: disable=no-member
        feed.podcast.itunes_category('Music', 'Music History')

        for item in rss['items']:
            tid = int(item['link'].split('/')[-1])
            url = self.get_download_url(tid)
            if url is None:
                print(str(tid) + ": no download url")
                continue

            content_type, length = self.get_content_metadata(url)
            if not type:
                print(url + ': no response to HEAD')
                continue

            entry = feed.add_entry()
            entry.id(item['id'])
            entry.title(self.get_title(item['title']))
            entry.description(item['summary'])
            entry.enclosure(url, length, content_type)
            entry.published(item['published'])

        feed.rss_str(pretty=True)
        feed.rss_file('/home/webs/leah/kwed.xml')
Exemplo n.º 45
0
def saveFeed(listings, title, path):

    url = githubRepoURL + title + ".xml"

    # Create a feed generator
    fg = FeedGenerator()

    # Create the feed's title
    fg.id(url)
    fg.title(title)
    fg.author({'name': 'Ben Snell'})
    fg.description("Art Show Open Call Opportunities")
    fg.link(href=url, rel='alternate')
    fg.language('en')
    time = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "-05:00"
    fg.pubDate(time)
    fg.updated(time)

    for item in listings:

        e = fg.add_entry()

        e.id(item["ID"])
        # Get a clearer title
        thisTitle = getShortDate(item["Application Deadline"]) + item["Title"]
        e.title(thisTitle)
        # for key, value in item.items():
        # print(key, value);
        # print(item["url"])
        # if "url" in item:
        e.link(href=item["url"])

        text = getHtmlFormattedListing(item)
        e.content(type="html", content=text)

        # This doesn't seem to work:
        # e.pubDate( datetime2RSSString(clDate(apt[2])) )
        # e.updated( datetime2RSSString(clDate(apt[2])) )

    fg.atom_str(pretty=True)
    fg.atom_file(path)
Exemplo n.º 46
0
def create_feed(posts):
    fg = FeedGenerator()
    fg.id(SITE_URL)
    fg.title(SITE_NAME)
    fg.author(AUTHOR_INFO)
    fg.link(href=SITE_URL, rel='alternate')
    fg.link(href=SITE_URL + '/feed.atom', rel='self')
    fg.language(FEED_LANGUAGE)
    fg.image(url=IMG_URL)

    for i in range(min(10, len(posts))):
        post = posts[i]
        content = makeAtomContent(post['content'])
        fe = fg.add_entry()
        fe.id(fg.id() + '/' + post['url'])
        fe.title(post['title'])
        fe.link(href=fe.id())
        fe.published(post['date'].replace(tzinfo=tzutc()))
        fe.content(content, type="CDATA")

    return fg.atom_str(pretty=True).decode('utf-8')
Exemplo n.º 47
0
def makefeed(eps):
    fg = FeedGenerator()
    fg.title('NPR Morning Edition')
    fg.author({'name': 'NPR'})
    fg.link(href='https://www.npr.org/programs/morning-edition/',
            rel='alternate')
    fg.logo(
        'https://cdn.shopify.com/s/files/1/0877/5762/products/Podcast_Stickers_ME_1024x1024.jpg'
    )
    fg.subtitle('Daily news from NPR')
    fg.link(href='https://ufr96k0yxe.execute-api.us-east-1.amazonaws.com/prod',
            rel='self')
    fg.language('en')
    # Add feed episodes
    for e in eps:
        fe = fg.add_entry()
        fe.id(e['url'])
        fe.title(e['title'])
        fe.link(href=e['url'])
        fe.published(e['date'])
    return fg.rss_str(pretty=True)
Exemplo n.º 48
0
    def _generate_feed(self):
        feed = FeedGenerator()

        feed.load_extension('podcast')
        feed.podcast.itunes_author(self.author)
        feed.podcast.itunes_category(self.category)
        feed.podcast.itunes_explicit(self.is_explicit)
        feed.podcast.itunes_image(f'{self.logo_uri}.{self.JPG_FILE_EXTENSION}')
        feed.podcast.itunes_owner(name=self.author, email=self.email)
        feed.podcast.itunes_subtitle(self.subtitle)
        feed.podcast.itunes_summary(self.description)

        feed.author(name=self.author, email=self.email)
        feed.description(self.description)
        feed.language(self.language)
        feed.link(href=self.website, rel='alternate')
        feed.logo(self.logo_uri)
        feed.subtitle(self.subtitle)
        feed.title(self.name)

        return feed
Exemplo n.º 49
0
def feed(posts: Tuple[Post], kind: str) -> FeedGenerator:
    log.debug('generating %s feed', kind)
    fg = FeedGenerator()
    fg.title('beepb00p')
    fg.author(name='karlicoss', email='*****@*****.**')
    # TODO better description?
    fg.description('feed')

    bb = lambda x: f'https://beepb00p.xyz{x}'
    fg.id(bb(f'/{kind}.xml'))
    fg.link(rel='self', href=bb(f'/{kind}.xml'))
    fg.link(href=bb(''))
    if len(posts) > 0:
        dates = (p.date for p in posts)
        fg.updated(
            max(tz.localize(d) if d is not None else throw() for d in dates))

    # eh, apparnetly in adds items to the feed from bottom to top...
    for post in reversed(posts):
        fe = fg.add_entry()
        # not sure why id() doesn't allow to set permalink=True
        fe.guid(bb(post.url), permalink=True)
        fe.link(href=bb(post.url))
        fe.title(post.title)
        # TOOD FIXME meh.
        d = post.date
        assert d is not None
        td = tz.localize(d)
        fe.published(td)
        fe.updated(td)
        # TODO meh, later use proper update date...
        #
        # TODO use type=text/html for comparisons?
        # TODO meh
        if post.upid == 'infra_diagram':
            content = "Sorry, this post contains a huge diagram and isn't RSS friendly. It's best viewed on the website"
        else:
            content = post.body
        fe.content(content, type='html')
    return fg
Exemplo n.º 50
0
def objects_atom():
    recent_objects = JsonObject.query.order_by(JsonObject.last_updated.desc()).limit(50)
    fg = FeedGenerator()
    fg.id(url_for("objects_atom", _external=True))
    fg.title("Recent objects published on MOSP")
    # fg.subtitle("")
    fg.link(href=application.config["INSTANCE_URL"], rel="self")
    fg.author(
        {
            "name": application.config["ADMIN_URL"],
            "email": application.config["ADMIN_EMAIL"],
        }
    )
    fg.language("en")
    for recent_object in recent_objects:
        fe = fg.add_entry()
        fe.id(
            url_for(
                "object_bp.get_json_object", object_id=recent_object.id, _external=True
            )
        )
        fe.title(recent_object.name)
        fe.description(recent_object.description)
        fe.author({"name": recent_object.organization.name})
        fe.content(
            json.dumps(
                recent_object.json_object,
                sort_keys=True,
                indent=4,
                separators=(",", ": "),
            )
        )
        fe.published(recent_object.last_updated.replace(tzinfo=timezone.utc))
        fe.link(
            href=url_for(
                "object_bp.get_json_object", object_id=recent_object.id, _external=True
            )
        )
    atomfeed = fg.atom_str(pretty=True)
    return atomfeed
Exemplo n.º 51
0
 def build_feed(self, commits: List[Commit]):
     log.info("build feed page %d" % len(commits))
     feed = FeedGenerator()
     feed.id("")
     feed.title("AWS API Changes")
     feed.author({
         "name": "AWSPIChanges",
         "email": "https://github.com/awslabs/aws-sdk-api-changes",
     })
     feed.link(href=self.site_url, rel="alternate")
     feed.link(href="%s/feed/" % self.site_url, rel="self")
     feed.description("AWS API ChangeLog")
     feed.language("en-US")
     feed.generator("artisan-sdk-gitops")
     feed.image(
         url=
         "https://a0.awsstatic.com/main/images/logos/aws_logo_smile_179x109.png"
     )  # noqa
     for c in commits:
         for s in c.service_changes:
             fe = feed.add_entry(order="append")
             fe.title("{} - {}{}methods".format(
                 s.title,
                 s.count_new and "%d new " % s.count_new or "",
                 s.count_updated and "%d updated " % s.count_updated or "",
             ))
             fe.id("{}-{}".format(c.id, s.name))
             fe.description(s.change_log)
             fe.link({
                 "href":
                 self.link("archive/changes/%s-%s.html" %
                           (c.id[:6], s.name))
             })
             fe.published(c.created)
     self.render_page(
         "feed/feed.rss",
         force=True,
         content=feed.rss_str(pretty=True).decode("utf8"),
     )
Exemplo n.º 52
0
def feed(column_id):
    api = Api(column_id)

    with request.urlopen(api.info) as stream:
        result = stream.read().decode('utf-8')

    if not result:
        return '', 404

    info = json.loads(result)

    with request.urlopen(api.posts) as stream:
        result = stream.read().decode('utf-8')
        entries = json.loads(result)

    fg = FeedGenerator()
    fg.id(str(entries[0]['slug']))
    fg.title(info['name'])
    fg.language('zh_CN')
    fg.icon(info['avatar']['template'].replace('{id}',
                                               info['avatar']['id']).replace(
                                                   '{size}', 's'))
    fg.logo(info['avatar']['template'].replace('{id}',
                                               info['avatar']['id']).replace(
                                                   '{size}', 'l'))
    fg.description(info['intro'])
    fg.author(dict(name=info['creator']['name']))
    fg.link(href=api.base_url + info['url'], rel='alternate')
    for entry in entries:
        fe = fg.add_entry()
        fe.id(entry['url'])
        fe.title(entry['title'])
        fe.published(entry['publishedTime'])
        fe.updated(entry['publishedTime'])
        fe.author(dict(name=entry['author']['name']))
        fe.link(href=api.base_url + entry['url'], rel='alternate')
        fe.content(entry['content'])

    return fg.atom_str(pretty=True)
Exemplo n.º 53
0
def podcast_feed_generator():
    """This should be optimized and constructed only once."""
    fg = FeedGenerator()
    fg.id('Sam.space')
    fg.title("Sam's Space")
    fg.link(href='https://samanvayvajpayee.com')
    fg.author({'name': 'Sam Vajpayee', 'email': '*****@*****.**'})
    fg.subtitle('Things that make my mind go bing!')
    fg.language('en')
    fg.description(
        """My corner of the Great WWW where I talk about things I want to talk about"""
    )

    podcasts = get_static_json('static/podcasts/podcasts.json')
    for podcast in podcasts:
        fe = fg.add_entry()
        fe.id(podcast['url'])
        fe.title(podcast['title'])
        fe.description(podcast['description'])
        fe.enclosure(podcast['url'], 0, 'audio/mpeg')

    return fg
Exemplo n.º 54
0
def feed(request):
    address_info = resolve_address(request)
    if not address_info:
        return redirect('/')

    blog = address_info['blog']
    root = address_info['root']

    all_posts = blog.post_set.filter(publish=True,
                                     is_page=False).order_by('-published_date')

    fg = FeedGenerator()
    fg.id(f'http://{root}/')
    fg.author({'name': blog.subdomain, 'email': 'hidden'})
    fg.title(blog.title)
    if blog.content:
        fg.subtitle(clean_text(unmark(blog.content)[:160]))
    else:
        fg.subtitle(blog.title)
    fg.link(href=f"http://{root}/", rel='alternate')

    for post in all_posts:
        fe = fg.add_entry()
        fe.id(f"http://{root}/{post.slug}/")
        fe.title(post.title)
        fe.author({'name': blog.subdomain, 'email': 'hidden'})
        fe.link(href=f"http://{root}/{post.slug}/")
        fe.content(clean_text(mistune.html(post.content)), type="html")
        fe.published(post.published_date)
        fe.updated(post.published_date)

    if request.GET.get('type') == 'rss':
        fg.link(href=f"http://{root}/feed/?type=rss", rel='self')
        rssfeed = fg.rss_str(pretty=True)
        return HttpResponse(rssfeed, content_type='application/rss+xml')
    else:
        fg.link(href=f"http://{root}/feed/", rel='self')
        atomfeed = fg.atom_str(pretty=True)
        return HttpResponse(atomfeed, content_type='application/atom+xml')
Exemplo n.º 55
0
def generate_feed(articles):
    articles_count = settings.FEED.get('articles_count', 10)
    author = settings.FEED.get('author', '')
    language = settings.FEED.get('language', 'en')

    fg = FeedGenerator()
    fg.title(settings.BLOG_TITLE)
    fg.author(name=author)
    fg.id(id=settings.HOST)
    fg.link(href=settings.HOST, rel='alternate')
    fg.language(language)

    for i, article in enumerate(articles):
        if i == articles_count:
            break

        fe = fg.add_entry()
        fe.id(article_link(article.meta['Url']))
        fe.link(href=article_link(article.meta['Url']), rel='alternate')
        fe.title(article.meta['Title'])

    fg.atom_file(pjoin(settings.OUTPUT_DIR.full_path, 'feed.xml'))
Exemplo n.º 56
0
def make_feed(filename='epistles.xml'):
    fg = FeedGenerator()
    fg.title('Daily Epistles')
    fg.author({'name': 'Tim Hopper'})
    fg.subtitle('Listen to the New Testament epistles each month.')
    fg.language('en')
    fg.link(href='http://www.crossway.com', rel='alternate')

    for day, division in enumerate(get_divisons(), 1):
        entry = fg.add_entry()
        entry.id(division)
        entry.title(division)
        pubdate = datetime.datetime(year=datetime.datetime.now().year,
                                    month=datetime.datetime.now().month,
                                    day=day,
                                    hour=pubhour,
                                    tzinfo=tz)
        entry.published(pubdate)
        entry.enclosure(get_url(division), 0, 'audio/mpeg')

    fg.rss_str(pretty=True)
    fg.rss_file('epistles.xml')
Exemplo n.º 57
0
def generate_feed():
    tz = pytz.timezone(config.timezone)
    # Get latest X entries from database
    entries = dbhelper.get_latest_entries()

    fg = FeedGenerator()
    # Feed id
    fg.id(config.bot_link)
    # Creator info (for Atom)
    fg.author(name=config.author_name, email=config.author_email, replace=True)
    # Self link to the feed
    fg.link(href=config.feed_link, rel='self')
    # Set description of your feed
    fg.description(config.feed_description)
    # Last time feed updated (use system time with timezone)
    fg.lastBuildDate(
        formatdate(datetime.timestamp(datetime.now(tz)), localtime=True))
    fg.title(config.feed_title)
    # Set time-to-live (I really don't know why set this)
    fg.ttl(5)
    # Does this parameter mean anything?
    fg.language(config.feed_language)

    for entry in entries:
        item = fg.add_entry()
        # Use message id to form valid URL (new feature in Telegram since Feb 2016)
        item.id("{!s}".format(entry["pid"]))
        item.link(href="{!s}/{!s}".format(config.bot_link, entry["pid"]),
                  rel="alternate")
        # Set title and content from message text
        item.title(entry["ptext"])
        item.content(entry["ptext"])
        # Set publish/update datetime
        item.pubdate(entry["pdate"])
        item.updated(entry["pdate"])

    # Write RSS/Atom feed to file
    # It's preferred to have only one type at a time (or just create two functions)
    fg.atom_file('static/atom.xml')
Exemplo n.º 58
0
def make_feed(feed: FeedGenerator, blogs: list[Path]):
    feed.title("Anurudh's Blog")
    feed.description("Navigating the manifold of computing")
    feed.author(name='Anurudh Peduri')
    feed.language('en')

    SITE_PATH = "https://anurudhp.github.io/"
    feed.link(href=SITE_PATH)
    feed.id(SITE_PATH)

    for blog in blogs:
        metadata = read_metadata(blog)
        if metadata is not None:
            logging.info(f'Adding: {metadata["title"]}')
            entry = feed.add_entry()
            loc = SITE_PATH + str(blog).replace('.md', '.html')
            entry.id(loc)
            entry.title(metadata['title'])
            entry.link(href=loc)
            pubDate = sh.date('-R', '-d', metadata['created']).strip()
            entry.pubDate(pubDate)
            entry.author(email='Anurudh Peduri')
Exemplo n.º 59
0
def getRSSFeed(series):
    seriesName = series.name

    fg = FeedGenerator()
    fg.title("%s Bolumleri" % seriesName)
    fg.author({'name': 'Riperion Medya', 'email': '*****@*****.**'})

    for season in series.seasons:
        seasonNumber = season.season_number
        for episode in season.episodes:
            link = episode.media_link
            date = episode.date_found
            episodeNoInSeason = episode.number - season.season_starting_episode_number + 1

            fe = fg.add_entry()
            fe.title(
                "%s %d. Bolum (S%02dE%02d)" %
                (seriesName, episode.number, seasonNumber, episodeNoInSeason))
            fe.published(date)
            fe.link(link)

    return fg.rss_str(pretty=True)