Beispiel #1
0
    def feedbuild(self, title, link, desc, pubdate, entries):
        fg = FeedGenerator()
        fg.docs(docs='')
        fg.generator(generator='', version=None, uri=None)
        fg.title(title)
        fg.link(href=link, replace=False)
        fg.description(desc)
        fg.lastBuildDate(lastBuildDate=currenttime())

        for entrie in reversed(entries):
            fe = fg.add_entry()
            fe.link(
                {
                    "href": unquote_plus(entrie[0]),
                    "rel": "alternate",
                    "type": None,
                    "hreflang": "en",
                    "title": entrie[1],
                    "length": 0,
                },
                replace=False)
            fe.title(entrie[1])
            fe.guid(guid=entrie[2], permalink=True)
            fe.category({"term": entrie[3], "scheme": None}, replace=False)
            fe.description(unquote_plus(entrie[4]))
            fe.pubDate(entrie[5])
        return fg.rss_str(pretty=True)
Beispiel #2
0
 def generate(self):
     main_feed_generator = FeedGenerator()
     main_feed_generator.title('MIUI Updates Tracker by XiaomiFirmwareUpdater')
     main_feed_generator.link(href=website, rel='alternate')
     main_feed_generator.description('Your best website to track MIUI ROM releases!')
     main_feed_generator.language('en')
     main_feed_generator.logo(f'{website}/images/xfu.png')
     main_feed_generator.lastBuildDate(None)
     for update in self.updates:
         short_codename = update.codename.split('_')[0]
         if short_codename not in self.feeds.keys():
             feed_generator = FeedGenerator()
             feed_generator.title(f'{update.name} MIUI Updates Tracker by XiaomiFirmwareUpdater')
             feed_generator.link(href=f'{website}/miui/{short_codename}', rel='alternate')
             feed_generator.description('Your best website to track MIUI ROM releases!')
             feed_generator.language('en')
             feed_generator.logo(f'{website}/images/xfu.png')
             feed_generator.lastBuildDate(None)
         else:
             feed_generator = self.feeds.get(short_codename)
         feed_generator = self.add_feed_entry(feed_generator, update)
         self.feeds.update({short_codename: feed_generator})
         main_feed_generator = self.add_feed_entry(main_feed_generator, update)
     main_feed_generator.rss_file(f"{CONF_DIR}/rss/latest.xml")
     for codename, feed in self.feeds.items():
         feed.rss_file(f"{CONF_DIR}/rss/{codename}.xml")
Beispiel #3
0
def get_weekly_jobs_rss():
    redis_url = current_app.config.get("CACHE_REDIS_URL")
    jobs_weekly_email_key = current_app.config.get(
        "WEEKLY_JOBS_EMAIL_REDIS_KEY")

    redis = StrictRedis.from_url(redis_url)

    raw_email_entry = redis.hgetall(jobs_weekly_email_key)
    title = raw_email_entry[b"title"].decode("UTF-8")
    content = raw_email_entry[b"html"].decode("UTF-8")
    timestamp = float(raw_email_entry[b"timestamp"])
    date = datetime.fromtimestamp(timestamp, tz=pytz.UTC)

    feed = FeedGenerator()
    feed.link(href=request.url_root)
    feed.title("INSPIRE Weekly HEP Jobs")
    feed.author({"name": "inspirehep.net"})
    feed.description("Feed for weekly HEP jobs from INSPIRE")
    feed.pubDate(date)
    feed.lastBuildDate(date)

    entry = feed.add_entry()
    entry.id(str(timestamp))
    entry.title(title)
    entry.content(content)
    entry.published(date)

    return Response(response=feed.rss_str(), mimetype="application/rss+xml")
Beispiel #4
0
def create_rss_feed(poemlink):
    """
    Takes the link that we fetched
    and then writes it to an xml file for a feed reader to fetch

    :param poemlink: url
    :type poemlink: string
    """

    # Create a feedgen feed instance and populate it with my details
    poemfeed = FeedGenerator()
    poemfeed.title("Jason's PF feed")
    poemfeed.link(href=poemlink)
    poemfeed.description("Poem of the day")
    poemfeed.lastBuildDate(datetime.now(pytz.timezone('Asia/Kolkata')))

    # Create an rss entry with the url we scraped and parsed
    pf_current_entry = poemfeed.add_entry()
    pf_current_entry.title(f"Poem for {date.today()}")
    pf_current_entry.link(href=poemlink)
    pf_current_entry.guid(f"Poem for {date.today()}")
    pf_current_entry.pubDate(datetime.now(pytz.timezone('Asia/Kolkata')))

    # Write the feed
    poemfeed.rss_file('poem.xml')
Beispiel #5
0
def _create_feed(speaker, talks, file_name):
    LOGGER.info("Creating feed for %s", speaker)
    updated = talks[0]['time']

    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.language('en')
    fg.title(f'Talks By {speaker}')
    fg.link(href='http://philip.lundrigan.org/Speakercast/')
    fg.image(url=f'http://philip.lundrigan.org/Speakercast/covers/{urllib.parse.quote(speaker)}.jpg',
             title=f'General Conference talks by {speaker}.')
    fg.description(f'General Conference talks by {speaker}.')
    fg.author({'name':'Philip Lundrigan', 'email':'*****@*****.**'})
    fg.generator('Speakercast')
    fg.pubDate(updated)
    fg.lastBuildDate(updated)
    fg.podcast.itunes_category('Religion & Spirituality', 'Christianity')

    for talk in talks:
        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1/file.mp3')
        fe.title(talk['title'])
        fe.description(talk['preview'])
        fe.content(talk['html'], type='CDATA')
        fe.enclosure(talk['audio_url'], str(talk['audio_size']), 'audio/mpeg')
        fe.id(talk['uri'])
        fe.link(href=talk['url'])
        fe.published(talk['time'])

    fg.rss_file(file_name, pretty=True)
Beispiel #6
0
def build_xml_feed(allchapters, verbose=True):

    if verbose:
        print
        print "Generating feeds..."

    if len(allchapters) == 0: raise CRMangaFeedException("Empty chapter list")

    crtz = pytz.timezone('America/New_York')

    fg = FeedGenerator()
    fg.id('http://utils.senpai.moe/')
    fg.title('Crunchyroll Manga - Latest Chapters (Unofficial)')
    fg.author({'name': 'Nosgoroth', 'email': '*****@*****.**'})
    fg.link(href='http://utils.senpai.moe/')
    fg.subtitle(
        'Latest manga chapters, updated daily, using undocumented API.')
    fg.language('en')
    fg.ttl(15)

    allchapters = sorted(allchapters,
                         key=itemgetter('updated_t'),
                         reverse=True)

    first = allchapters[0]["updated_t"].replace(tzinfo=crtz)
    fg.updated(first)
    fg.lastBuildDate(first)

    for chapter in allchapters[0:100]:
        fe = fg.add_entry()
        fe.id(chapter["url"])
        fe.link({
            "href": chapter["url"],
            "rel": "alternate",
            "title": "Read online"
        })
        fe.title("%s - %s" % (chapter["series"], chapter["name"]))
        fe.summary("<p>%s has been added to %s in Crunchyroll Manga.</p>" %
                   (chapter["name"], chapter["series"]))
        fe.published(chapter["updated_t"].replace(tzinfo=crtz))

        chapter_serial = chapter.copy()
        chapter_serial.pop("updated_t", None)
        chapter_serial.pop("url", None)
        chapter_serial.pop("thumb", None)
        chapter_serial["chapter_id"] = chapter_serial["guid"]
        chapter_serial.pop("guid", None)

        content = "<p>%s has been added to %s in Crunchyroll Manga.</p><p>Updated: %s</p><img src=\"%s\" />" % (
            chapter["name"], chapter["series"], chapter["updated"],
            chapter["thumb"])
        content += "<!--JSON:[[%s]]-->" % json.dumps(chapter_serial)
        fe.content(content)

    fg.rss_file(os.path.join(DESTINATION_FOLDER, 'updates_rss.xml'),
                pretty=DEBUG)  # Write the RSS feed to a file
    fg.atom_file(os.path.join(DESTINATION_FOLDER, 'updates_atom.xml'),
                 pretty=DEBUG)  # Write the ATOM feed to a file
Beispiel #7
0
    def get(self):
        fg = FeedGenerator()
        fg.id("http://test.ts")
        fg.title("My Test Feed")
        fg.icon("https://avatars1.githubusercontent.com/u/715660?v=3&s=32")
        fg.author({'name': "The Author", 'email': "*****@*****.**"})

        fg.link(href="http://example.org/index.atom?page=2", rel="next")

        fg.link(href="http://test.ts", rel="alternate")
        fg.logo("https://avatars1.githubusercontent.com/u/715660?v=3&s=32")
        fg.description("Este é o monstro do lago 1")
        fg.subtitle("This is an example feed!")
        fg.language("en-us")
        # Handle this:
        #< sy:updatePeriod > hourly < / sy:updatePeriod >
        #< sy:updateFrequency > 1 < / sy:updateFrequency >

        fg.lastBuildDate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        fi = fg.add_item()
        fi.id("http://test.ts/id/1", )
        #fi.link(link="http://test.ts/id/1")
        fi.title("Monstro do Lago 1")
        fi.description("Este é o monstro do lago 1")
        fi.comments("http://test.ts/id/1/comments")
        fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        fi = fg.add_item()
        fi.id("http://test.ts/id/2")
        fi.title("Monstro do Lago 2")
        fi.description("Este é o monstro do lago 2")
        fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        #test = fg.atom_str(pretty=True)

        rss_str = fg.rss_str(pretty=True)
        self.set_header("Content-Type", 'application/xml; charset="utf-8"')
        #self.set_header("Content-Disposition",
        # "attachment; filename='test.xml'")
        self.write(rss_str)


        #if regexp.search(word) is not None:
        #    print
        #    'matched'
        if self.is_browser_mobile():
            print("buu")
        else:
            print(self.request.headers["User-Agent"])
Beispiel #8
0
    def get(self):
        fg = FeedGenerator()
        fg.id("http://test.ts")
        fg.title("My Test Feed")
        fg.icon("https://avatars1.githubusercontent.com/u/715660?v=3&s=32")
        fg.author({'name': "The Author", 'email': "*****@*****.**"})

        fg.link(href="http://example.org/index.atom?page=2", rel="next")

        fg.link(href="http://test.ts", rel="alternate")
        fg.logo("https://avatars1.githubusercontent.com/u/715660?v=3&s=32")
        fg.description("Este é o monstro do lago 1")
        fg.subtitle("This is an example feed!")
        fg.language("en-us")
        # Handle this:
        #< sy:updatePeriod > hourly < / sy:updatePeriod >
        #< sy:updateFrequency > 1 < / sy:updateFrequency >

        fg.lastBuildDate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        fi = fg.add_item()
        fi.id("http://test.ts/id/1", )
        #fi.link(link="http://test.ts/id/1")
        fi.title("Monstro do Lago 1")
        fi.description("Este é o monstro do lago 1")
        fi.comments("http://test.ts/id/1/comments")
        fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        fi = fg.add_item()
        fi.id("http://test.ts/id/2")
        fi.title("Monstro do Lago 2")
        fi.description("Este é o monstro do lago 2")
        fi.pubdate(datetime.now(pytz.timezone("America/Sao_Paulo")))

        #test = fg.atom_str(pretty=True)

        rss_str = fg.rss_str(pretty=True)
        self.set_header("Content-Type", 'application/xml; charset="utf-8"')
        #self.set_header("Content-Disposition",
        # "attachment; filename='test.xml'")
        self.write(rss_str)

        #if regexp.search(word) is not None:
        #    print
        #    'matched'
        if self.is_browser_mobile():
            print("buu")
        else:
            print(self.request.headers["User-Agent"])
Beispiel #9
0
def generate_rss(all_news: list[News]) -> str:
    date_now = datetime.now(JST)
    fg = FeedGenerator()
    fg.title('津山高専からのお知らせ')
    fg.description('津山高専からのお知らせ')
    fg.link(href=NEWS_URL, rel='alternate')
    fg.lastBuildDate(date_now)
    fg.language('ja')

    for news in all_news:
        fe = fg.add_entry()
        fe.title(f'[{news.kind}] {news.subject}')
        fe.link(href=f'{NEWS_URL}{news.link}')
        fe.pubDate(news.published_date)

    return fg.rss_str().decode('utf-8')
Beispiel #10
0
    def _build_feed(self):
        router = Router(self._config)
        fp = filterPlaceholders

        fg = FeedGenerator()
        fg.id(self._config.site_prefix)
        fg.title(self._config.site_name)
        fg.author({
            'name': fp(self._config.author),
            'email': fp(self._config.email)
        })
        fg.link(href=self._config.domain_name + self._config.site_prefix,
                rel='alternate')
        fg.logo(fp(self._config.site_logo))
        fg.subtitle(fp(self._config.description))
        fg.description(fp(self._config.description) or ' ')
        fg.language(fp(self._config.language))
        fg.lastBuildDate(moment.now().locale(self._config.locale).date)
        fg.pubDate(moment.now().locale(self._config.locale).date)

        for post in self._posts[:10]:
            meta = post.meta
            fe = fg.add_entry()
            fe.title(meta['title'])
            fe.link(href=self._config.domain_name +
                    router.gen_permalink_by_meta(meta))
            fe.guid(router.gen_permalink_by_meta(meta), True)
            fe.pubDate(meta['date'].date)
            fe.author({
                'name': meta['author'],
                'uri': fp(self._config.author_homepage),
                'email': fp(self._config.email)
            })
            fe.content(post.parsed)

        if not os.path.exists(
                unify_joinpath(self._config.build_dir, 'feed/atom')):
            os.makedirs(unify_joinpath(self._config.build_dir, 'feed/atom'))

        fg.rss_file(unify_joinpath(self._config.build_dir, 'feed/index.xml'))
        fg.rss_file(unify_joinpath(self._config.build_dir, 'feed/index.html'))
        fg.atom_file(
            unify_joinpath(self._config.build_dir, 'feed/atom/index.xml'))
        fg.atom_file(
            unify_joinpath(self._config.build_dir, 'feed/atom/index.html'))
Beispiel #11
0
    def render_rss(self, post_list):
        router = Router(self._config)
        fg = FeedGenerator()
        fg.id(self._config.site_prefix)
        fg.title(self._config.site_name)
        fg.author({'name': self._config.author, 'email': self._config.email})
        fg.link(href=self._config.site_prefix, rel='alternate')
        fg.logo(self._config.site_logo)
        fg.subtitle(self._config.description)
        fg.language('zh-CN')
        fg.lastBuildDate(moment.now().locale('Asia/Shanghai').date)
        fg.pubDate(moment.now().locale('Asia/Shanghai').date)

        for post in post_list[:10]:
            meta = post.meta
            fe = fg.add_entry()
            fe.title(meta['title'])
            fe.link(href=router.gen_permalink_by_meta(meta))
            fe.guid(router.gen_permalink_by_meta(meta), True)
            fe.pubDate(meta['date'].date)
            fe.author({
                'name': meta['author'],
                'uri': self._config.author_homepage,
                'email': self._config.email
            })
            fe.content(post.parsed)

        if not os.path.exists(
                Utils.unify_joinpath(self._config.build_dir, 'feed/atom')):
            os.makedirs(
                Utils.unify_joinpath(self._config.build_dir, 'feed/atom'))

        fg.rss_file(
            Utils.unify_joinpath(self._config.build_dir, 'feed/index.xml'))
        fg.rss_file(
            Utils.unify_joinpath(self._config.build_dir, 'feed/index.html'))
        fg.atom_file(
            Utils.unify_joinpath(self._config.build_dir,
                                 'feed/atom/index.xml'))
        fg.atom_file(
            Utils.unify_joinpath(self._config.build_dir,
                                 'feed/atom/index.html'))
Beispiel #12
0
def generate_feed():
    tz = pytz.timezone(config.timezone)
    # Get latest X entries from database
    entries = dbhelper.get_latest_entries()

    fg = FeedGenerator()
    # Feed id
    fg.id(config.bot_link)
    # Creator info (for Atom)
    fg.author(name=config.author_name, email=config.author_email, replace=True)
    # Self link to the feed
    fg.link(href=config.feed_link, rel='self')
    # Set description of your feed
    fg.description(config.feed_description)
    # Last time feed updated (use system time with timezone)
    fg.lastBuildDate(
        formatdate(datetime.timestamp(datetime.now(tz)), localtime=True))
    fg.title(config.feed_title)
    # Set time-to-live (I really don't know why set this)
    fg.ttl(5)
    # Does this parameter mean anything?
    fg.language(config.feed_language)

    for entry in entries:
        item = fg.add_entry()
        # Use message id to form valid URL (new feature in Telegram since Feb 2016)
        item.id("{!s}".format(entry["pid"]))
        item.link(href="{!s}/{!s}".format(config.bot_link, entry["pid"]),
                  rel="alternate")
        # Set title and content from message text
        item.title(entry["ptext"])
        item.content(entry["ptext"])
        # Set publish/update datetime
        item.pubdate(entry["pdate"])
        item.updated(entry["pdate"])

    # Write RSS/Atom feed to file
    # It's preferred to have only one type at a time (or just create two functions)
    fg.atom_file('static/atom.xml')
def generate_feed():
    tz = pytz.timezone(config.timezone)
    # Get latest X entries from database
    entries = dbhelper.get_latest_entries()

    fg = FeedGenerator()
    # Feed id
    fg.id(config.bot_link)
    # Creator info (for Atom)
    fg.author(name=config.author_name, email=config.author_email, replace=True )
    # Self link to the feed
    fg.link(href=config.feed_link, rel='self')
    # Set description of your feed
    fg.description(config.feed_description)
    # Last time feed updated (use system time with timezone)
    fg.lastBuildDate(formatdate(datetime.timestamp(datetime.now(tz)), localtime=True))
    fg.title(config.feed_title)
    # Set time-to-live (I really don't know why set this)
    fg.ttl(5)
    # Does this parameter mean anything?
    fg.language(config.feed_language)
    
    for entry in entries:
        item = fg.add_entry()
        # Use message id to form valid URL (new feature in Telegram since Feb 2016)
        item.id("{!s}".format(entry["pid"]))
        item.link(href="{!s}/{!s}".format(config.bot_link, entry["pid"]), rel="alternate")
        # Set title and content from message text
        item.title(entry["ptext"])
        item.content(entry["ptext"])
        # Set publish/update datetime
        item.pubdate(entry["pdate"])
        item.updated(entry["pdate"])

    # Write RSS/Atom feed to file
    # It's preferred to have only one type at a time (or just create two functions)
    fg.atom_file('static/atom.xml')
Beispiel #14
0
feed_generator.description("List of the last updates on the content")
feed_generator.author({
    "name": "Core team",
    "email": "*****@*****.**"
})
feed_generator.link({
    "href": "https://cheatsheetseries.owasp.org",
    "rel": "self"
})
feed_generator.link({
    "href": "https://github.com/OWASP/CheatSheetSeries",
    "rel": "alternate"
})
feed_generator.language("en")
feed_generator.pubDate(current_date)
feed_generator.lastBuildDate(current_date)
for pull_request in pull_requests:
    # Take only merged PR
    if pull_request["merged_at"] is None:
        continue
    # Convert merge date from 2019-08-25T06:36:35Z To Sun, 19 May 2002 15:21:36 GMT
    merge_date_src = pull_request["merged_at"]
    merge_date_dst = datetime.strptime(
        merge_date_src,
        "%Y-%m-%dT%H:%M:%SZ").strftime("%a, %d %B %Y %H:%M:%S GMT")
    feed_entry = feed_generator.add_entry()
    feed_entry.id(pull_request["html_url"])
    feed_entry.title(pull_request["title"])
    feed_entry.link({"href": pull_request["html_url"], "rel": "self"})
    feed_entry.link({"href": pull_request["url"], "rel": "alternate"})
    feed_entry.pubDate(merge_date_dst)
def rss_feed_for_group(api, group, reposts=True):
    """
    Create rss feed based on the group posts

    :param api: VkApiMethod instance, to initialise it,
          api = vk_api.VkApi(USERNAME, PASSWORD).get_api();
    :param group: string, short name of a group, for instance,
            'club1' in https://vk.com/club1/;
    :param reposts: boolean, False if we do not want to add reposts
              to the feed
    :returns: FeedGenerator instance, ready for writing XML
    """

    # VK API allows to make 10000 calls per day with wall.get_localzone
    # so if we going to refresh a feed every 20 minutes (it's 72 a day),
    # we should be ok with about 138 groups (if I get it right)

    # Get the first 60 (should be enough) posts from a group
    vargs = {'domain': group, 'count': 60}

    # If a group doesn't have a short name, its url looks like,
    # for example, this: vk.com/club526452694, but, in general, a group
    # can have a short name beginning with 'club'. The problem is that
    # VK API doesn't allow to get the posts from the group 'club526452694'
    # if we use it as the short name (it returns an empty list) therefore
    # we have to check it
    if group[:4] == 'club':
        # So if it's a shortname beginning with 'club', we get an exception
        try:
            owner_id = -1 * int(group[4:])
            vargs['owner_id'] = owner_id
            del vargs['domain']
        except ValueError:
            pass

    try:
        posts = api.wall.get(**vargs)['items']
        # Get the name of a group
        group_name = api.groups.getById(group_id=group)[0]['name']
    except VkApiError as error_msg:
        print(error_msg)

    # Generate the feed
    fg = FeedGenerator()
    fg.title(group_name)
    fg.link(href='https://vk.com/{}/'.format(group))
    fg.description("Vk feed - {}".format(group_name))
    # Get the local timezone odject
    local_tz = get_localzone()
    # Feedgen lib desperatly want timezone info in every date
    fg.lastBuildDate(datetime.now(local_tz))

    # Go through the posts...
    for post in posts:
        # We do not need ads, right?
        if post['marked_as_ads']:
            continue
        # If the post is not a repost
        if post.get('copy_history') is None:
            post_data = post_parsing(post, group_name)
        # If it is, pass to post_parsing function the dictionary
        # post['copy_history'][0] representing the post
        # which the repost are made from (if we want reposts)
        elif reposts:
            post_data = post_parsing(post['copy_history'][0], group_name)
        else:
            continue

        # ...and create RSS items
        fe = fg.add_entry()
        fe.title(post_data['title'])
        fe.link(href=post_data['link'])
        fe.description(post_data['description'])
        fe.guid(post_data['guid'])
        fe.pubdate(post_data['pubDate'])

    return fg
Beispiel #16
0
fdr = 'https://fullduplexradio.us'
fg = FeedGenerator()
fg.load_extension('podcast')

fg.podcast.itunes_category('Music', 'Podcasting')
fg.title('Full Duplex Radio')
fg.description(
    "R&R play what they like, which is a lot. And they tell you about it.")
fg.link(link={'href': fdr})
myhost = socket.getfqdn("0.0.0.0")
# TODO: make this configurable
fg.image('https://*****:*****@{}:8080/FDR.jpg'.format(myhost),
         title='Full Duplex Radio',
         link=fdr)
local_tz = tz.tzlocal()
fg.lastBuildDate(datetime.now(tz=local_tz))
fg.rss_str(pretty=True)

response = requests.get(fdr)
if response.status_code == 200:
    rows = response.content.decode().split('\n')
    # '<a href="pl/FD406.html">Episode #406: Do You Know Any Nice Jewish Girls? (2020-11-07)</a>'
    for row in rows:
        match = re.match(
            r'<a href="(?P<rel>[^"]*)">Episode #(?P<ep>[0-9]+): (?P<title>.*) \((?P<date>.*)\)</a>',
            row)
        if match:
            add_episode(fg, match)

urls = [
    'https://fullduplexradio.us/audio/Full%20Duplex%20405%20-%202020-10-31.mp3',
Beispiel #17
0
def from_activities(activities,
                    actor=None,
                    title=None,
                    feed_url=None,
                    home_page_url=None,
                    hfeed=None):
    """Converts ActivityStreams activities to an RSS 2.0 feed.

  Args:
    activities: sequence of ActivityStreams activity dicts
    actor: ActivityStreams actor dict, the author of the feed
    title: string, the feed title
    feed_url: string, the URL for this RSS feed
    home_page_url: string, the home page URL
    hfeed: dict, parsed mf2 h-feed, if available

  Returns:
    unicode string with RSS 2.0 XML
  """
    try:
        iter(activities)
    except TypeError:
        raise TypeError('activities must be iterable')

    if isinstance(activities, (dict, str)):
        raise TypeError('activities may not be a dict or string')

    fg = FeedGenerator()
    fg.id(feed_url)
    assert feed_url
    fg.link(href=feed_url, rel='self')
    if home_page_url:
        fg.link(href=home_page_url, rel='alternate')
    # TODO: parse language from lang attribute:
    # https://github.com/microformats/mf2py/issues/150
    fg.language('en')
    fg.generator('granary', uri='https://granary.io/')

    hfeed = hfeed or {}
    actor = actor or {}
    image = (util.get_url(hfeed.get('properties', {}), 'photo')
             or util.get_url(actor, 'image'))
    if image:
        fg.image(image)

    props = hfeed.get('properties') or {}
    content = microformats2.get_text(util.get_first(props, 'content', ''))
    summary = util.get_first(props, 'summary', '')
    desc = content or summary or '-'
    fg.description(desc)  # required
    fg.title(title or util.ellipsize(desc))  # required

    latest = None
    feed_has_enclosure = False
    for activity in activities:
        obj = activity.get('object') or activity
        if obj.get('objectType') == 'person':
            continue

        item = fg.add_entry()
        url = obj.get('url')
        id = obj.get('id') or url
        item.id(id)
        item.link(href=url)
        item.guid(url, permalink=True)

        # title (required)
        title = (obj.get('title') or obj.get('displayName')
                 or util.ellipsize(obj.get('content', '-')))
        # strip HTML tags
        title = util.parse_html(title).get_text('').strip()
        item.title(title)

        content = microformats2.render_content(obj,
                                               include_location=True,
                                               render_attachments=True,
                                               render_image=True)
        if not content:
            content = obj.get('summary')
        if content:
            item.content(content, type='CDATA')

        categories = [
            {
                'term': t['displayName']
            } for t in obj.get('tags', [])
            if t.get('displayName') and t.get('verb') not in ('like', 'react',
                                                              'share')
            and t.get('objectType') not in ('article', 'person', 'mention')
        ]
        item.category(categories)

        author = obj.get('author', {})
        author = {
            'name': author.get('displayName') or author.get('username'),
            'uri': author.get('url'),
            'email': author.get('email') or '-',
        }
        item.author(author)

        published = obj.get('published') or obj.get('updated')
        if published and isinstance(published, str):
            try:
                dt = mf2util.parse_datetime(published)
                if not isinstance(dt, datetime):
                    dt = datetime.combine(dt, time.min)
                if not dt.tzinfo:
                    dt = dt.replace(tzinfo=util.UTC)
                item.published(dt)
                if not latest or dt > latest:
                    latest = dt
            except ValueError:  # bad datetime string
                pass

        item_has_enclosure = False
        for att in obj.get('attachments', []):
            stream = util.get_first(att, 'stream') or att
            if not stream:
                continue

            url = stream.get('url') or ''
            mime = mimetypes.guess_type(url)[0] or ''
            if (att.get('objectType') in ENCLOSURE_TYPES
                    or mime and mime.split('/')[0] in ENCLOSURE_TYPES):
                if item_has_enclosure:
                    logging.info(
                        'Warning: item %s already has an RSS enclosure, skipping additional enclosure %s',
                        id, url)
                    continue

                item_has_enclosure = feed_has_enclosure = True
                item.enclosure(url=url,
                               type=mime,
                               length=str(stream.get('size', '')))
                item.load_extension('podcast')
                duration = stream.get('duration')
                if duration:
                    item.podcast.itunes_duration(duration)

    if feed_has_enclosure:
        fg.load_extension('podcast')
        fg.podcast.itunes_author(
            actor.get('displayName') or actor.get('username'))
        if summary:
            fg.podcast.itunes_summary(summary)
        fg.podcast.itunes_explicit('no')
        fg.podcast.itunes_block(False)
        name = author.get('name')
        if name:
            fg.podcast.itunes_author(name)
        if image:
            fg.podcast.itunes_image(image)
        fg.podcast.itunes_category(categories)

    if latest:
        fg.lastBuildDate(latest)

    return fg.rss_str(pretty=True).decode('utf-8')
Beispiel #18
0
def from_activities(activities, actor=None, title=None, feed_url=None,
                    home_page_url=None, hfeed=None):
  """Converts ActivityStreams activities to an RSS 2.0 feed.

  Args:
    activities: sequence of ActivityStreams activity dicts
    actor: ActivityStreams actor dict, the author of the feed
    title: string, the feed title
    feed_url: string, the URL for this RSS feed
    home_page_url: string, the home page URL
    hfeed: dict, parsed mf2 h-feed, if available

  Returns:
    unicode string with RSS 2.0 XML
  """
  try:
    iter(activities)
  except TypeError:
    raise TypeError('activities must be iterable')

  if isinstance(activities, (dict, basestring)):
    raise TypeError('activities may not be a dict or string')

  fg = FeedGenerator()
  fg.id(feed_url)
  assert feed_url
  fg.link(href=feed_url, rel='self')
  if home_page_url:
    fg.link(href=home_page_url, rel='alternate')
  # TODO: parse language from lang attribute:
  # https://github.com/microformats/mf2py/issues/150
  fg.language('en')
  fg.generator('granary', uri='https://granary.io/')

  hfeed = hfeed or {}
  actor = actor or {}
  image = util.get_url(hfeed, 'image') or util.get_url(actor, 'image')
  if image:
    fg.image(image)

  props = hfeed.get('properties') or {}
  content = microformats2.get_text(util.get_first(props, 'content', ''))
  summary = util.get_first(props, 'summary', '')
  desc = content or summary or '-'
  fg.description(desc)  # required
  fg.title(title or util.ellipsize(desc))  # required

  latest = None
  enclosures = False
  for activity in activities:
    obj = activity.get('object') or activity
    if obj.get('objectType') == 'person':
      continue

    item = fg.add_entry()
    url = obj.get('url')
    item.id(obj.get('id') or url)
    item.link(href=url)
    item.guid(url, permalink=True)

    item.title(obj.get('title') or obj.get('displayName') or '-')  # required
    content = microformats2.render_content(
      obj, include_location=True, render_attachments=False) or obj.get('summary')
    if content:
      item.content(content, type='CDATA')

    item.category(
      [{'term': t['displayName']} for t in obj.get('tags', [])
       if t.get('displayName') and t.get('verb') not in ('like', 'react', 'share')])

    author = obj.get('author', {})
    item.author({
      'name': author.get('displayName') or author.get('username'),
      'uri': author.get('url'),
    })

    published = obj.get('published') or obj.get('updated')
    if published:
      try:
        dt = mf2util.parse_datetime(published)
        if not isinstance(dt, datetime):
          dt = datetime.combine(dt, time.min)
        if not dt.tzinfo:
          dt = dt.replace(tzinfo=util.UTC)
        item.published(dt)
        if not latest or dt > latest:
          latest = dt
      except ValueError:  # bad datetime string
        pass


    for att in obj.get('attachments', []):
      stream = util.get_first(att, 'stream') or att
      if not stream:
        continue

      url = stream.get('url') or ''
      mime = mimetypes.guess_type(url)[0] or ''
      if (att.get('objectType') in ENCLOSURE_TYPES or
          mime and mime.split('/')[0] in ENCLOSURE_TYPES):
        enclosures = True
        item.enclosure(url=url, type=mime, length='REMOVEME') # TODO: length (bytes)

        item.load_extension('podcast')
        duration = stream.get('duration')
        if duration:
          item.podcast.itunes_duration(duration)

  if enclosures:
    fg.load_extension('podcast')
    fg.podcast.itunes_author(actor.get('displayName') or actor.get('username'))
    if summary:
      fg.podcast.itunes_summary(summary)
    fg.podcast.itunes_explicit('no')
    fg.podcast.itunes_block(False)

  if latest:
    fg.lastBuildDate(latest)

  return fg.rss_str(pretty=True).decode('utf-8').replace(' length="REMOVEME"', '')
Beispiel #19
0
    fg.title('OpenMods updates')
    fg.author({'name': 'OpenMods'})
    fg.language('en')
    fg.link({'href': 'https://openmods.info'})
    fg.link({
        'href': 'https://openmods.info/atom.xml',
        'rel': 'self',
        'type': 'application/atom+xml'
    })
    fg.link({
        'href': 'https://openmods.info/rss.xml',
        'rel': 'self',
        'type': 'application/rss+xml'
    })
    fg.description('OpenMods update feed')
    fg.lastBuildDate(datetime.datetime.utcnow().replace(tzinfo=pytz.utc))

    for (bundle, mods) in sorted(data.items(),
                                 key=lambda (bundle, mods): bundle,
                                 reverse=True):
        bundle_date = datetime.datetime.strptime(
            bundle, "%Y-%m-%d").replace(tzinfo=pytz.utc)
        ue = fg.add_entry()
        ue.id("openmods.info:update:" + bundle)
        ue.title("New OpenMods update: " + bundle)
        ue.description(
            ", ".join(sorted([e.mod + " " + e.version
                              for e in mods.values()])), True)
        ue.link({'href': 'https://openmods.info'})
        ue.published(bundle_date)
        ue.updated(bundle_date)
Beispiel #20
0
def generateRSS(type="", username=""):
    if type not in ["rss", "atom"]:
        raise ValueError(
            'Wrong Type of RSS Feed given to the generator, only "rss" and "atom" accepted.'
        )

    try:
        user = FeedUser.objects.get(username=username)
    except ObjectDoesNotExist:
        raise ValueError("The requested user ['" + username +
                         "'] doesn't exist.")

    try:
        max_rss_posts = int(
            Option.objects.get(parameter="max_rss_posts").value)
    except ObjectDoesNotExist:
        raise ValueError("The Option 'max_rss_posts' doesn't exist.")

    ########## ======================================== FEED GENERATION =========================================== ##########

    fg = FeedGenerator()
    fg.id('https://www.feedcrunch.io/@' + username + '/')
    fg.title('Feedcrunch.IO - @' + user.username + " - " + user.rss_feed_title)
    fg.subtitle(user.description)

    fg.link(href="https://www.feedcrunch.io/", rel='alternate')
    if type == "rss":
        fg.link(href='https://www.feedcrunch.io/@' + username + '/rss/',
                rel='self',
                type="application/rss+xml")
    else:
        fg.link(href='https://www.feedcrunch.io/@' + username + '/atom/',
                rel='self',
                type="application/atom+xml")

    fg.logo('https://www.feedcrunch.io/static/images/favicon.png')
    fg.icon('https://www.feedcrunch.io/static/images/favicon.png')

    for interest in user.interests.all():
        fg.category(term=interest.name)

    fg.language("en-us")
    fg.rights('cc-by')
    fg.author({'name': user.get_full_name(), 'email': user.email})

    last_post_date = Post.objects.filter(
        user=user.username).order_by("-when")[:1][0].when
    fg.lastBuildDate(last_post_date)

    # ======== Adding Posts to the Feed ======== #

    listPosts = Post.objects.filter(
        user=username, activeLink=True).order_by('-id')[:max_rss_posts]

    for post in listPosts:
        fe = fg.add_entry()
        #fe.id(post.link)
        fe.id('https://www.feedcrunch.io/@' + username + '/redirect/' +
              str(post.id))
        fe.title(post.title)
        fe.summary(post.title)
        """
        fe.content('''Lorem ipsum dolor sit amet, consectetur adipiscing elit. Tamen
            aberramus a proposito, et, ne longius, prorsus, inquam, Piso, si ista
            mala sunt, placet. Aut etiam, ut vestitum, sic sententiam habeas aliam
            domesticam, aliam forensem, ut in fronte ostentatio sit, intus veritas
            occultetur? Cum id fugiunt, re eadem defendunt, quae Peripatetici,
            verba.''', type="CDATA")
        """

        fe.link(href='https://www.feedcrunch.io/@' + username + '/redirect/' +
                str(post.id),
                rel='alternate')
        fe.author({'name': user.get_full_name(), 'email': user.email})
        fe.updated(post.when)

        #fe.category([{'term' : 'category', 'scheme': 'http://www.somedomain.com/category', 'label' : 'Category'}])
        for tag in post.tags.all():
            fe.category([{'term': tag.name}])

    return fg