コード例 #1
0
ファイル: generate.py プロジェクト: aaearon/lebatard-show-rss
def generate_feed(output_file, exclude_highlights=True):
    # Parse RSS feed
    d = feedparser.parse(ESPN_RSS_FEED)
    IMAGE_URL = d.feed.image["href"]

    # RSS feed generation
    fg = FeedGenerator()
    fg.load_extension("podcast", rss=True)

    ## RSS tags
    # Required
    fg.title(d.feed.title)
    fg.link(href="https://github.com/aaearon/lebatard-show-rss")
    fg.description(d.feed.description)
    # Optional
    fg.language(d.feed.language)
    fg.image(IMAGE_URL)
    fg.subtitle(d.feed.subtitle)
    # iTunes
    fg.podcast.itunes_author(d.feed.author)
    fg.podcast.itunes_category(itunes_category=d.feed.category)
    fg.podcast.itunes_image(itunes_image=IMAGE_URL)
    fg.podcast.itunes_explicit(itunes_explicit="clean")
    fg.podcast.itunes_owner(name=CONTACT["name"], email=CONTACT["email"])

    tz = pytz.timezone("America/Los_Angeles")

    for e in d.entries:

        if exclude_highlights and episode_duration_string_to_int(e["itunes_duration"]) > 3600:
            pass
        else:
            fe = fg.add_entry()

            fe.id(e.id)
            fe.title(e.title)
            fe.description(e.description)
            fe.enclosure(url=e.enclosures[0]["href"], length=e.enclosures[0]["length"], type=e.enclosures[0]["type"])

            fe.podcast.itunes_summary(e.description)
            fe.podcast.itunes_subtitle(e.description)
            fe.podcast.itunes_duration(e["itunes_duration"])

            dt = datetime.fromtimestamp(time.mktime(e.published_parsed))
            date = tz.localize(dt)

            # Local hour
            if "Show: " in e.title:
                fe.published(date)
            elif "Hour 1" in e.title:
                fe.published(date + timedelta(hours=1))
            elif "Hour 2" in e.title:
                fe.published(date + timedelta(hours=2))
            elif "Hour 3" in e.title:
                fe.published(date + timedelta(hours=3))
            else:
                fe.published(date + timedelta(hours=-1))

    fg.rss_str(pretty=True)
    fg.rss_file(output_file)
コード例 #2
0
ファイル: feed.py プロジェクト: dyeray/podtube
def get_feed(query, title, description, link, image):
    """Get an RSS feed from the results of a query to the YouTube API."""
    service = _get_youtube_client()
    videos = service.search().list(part='snippet', **query, order='date',
                                   type='video', safeSearch='none').execute()
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.title(title)
    fg.description(description)
    fg.link(href=link, rel='alternate')
    fg.image(image)
    youtube_plugin = get_plugin_from_settings()

    for video in videos['items']:
        try:
            video_url = youtube_plugin.extract_link(
                "https://www.youtube.com/watch?v=" + video['id']['videoId'])
        except PluginException:
            continue
        fe = fg.add_entry()
        fe.id(video['id']['videoId'])
        fe.title(video['snippet']['title'])
        fe.description(video['snippet']['description'])
        fe.pubdate(dateutil.parser.parse(video['snippet']['publishedAt']))
        fe.podcast.itunes_image(video['snippet']['thumbnails']['high']['url'])
        video_info = requests.head(video_url)
        fe.enclosure(video_url, video_info.headers['Content-Length'],
                     video_info.headers['Content-Type'])
    return fg.rss_str(pretty=True)
コード例 #3
0
ファイル: rc2pc.py プロジェクト: dmascialino/rc2pc
def write_podcast(show, podcast_dir, base_public_url, showlocal_tz):
    """Create the podcast file."""
    fg = FeedGenerator()
    fg.load_extension('podcast')

    url = "{}{}.xml".format(base_public_url, show.id)
    fg.id(url.split('.')[0])
    fg.title(show.name)
    fg.image(show.image_url)
    fg.description(show.description)
    fg.link(href=url, rel='self')

    # collect all mp3s for the given show
    all_mp3s = glob.glob(os.path.join(podcast_dir, "{}_*.mp3".format(show.id)))

    for filepath in all_mp3s:
        filename = os.path.basename(filepath)
        mp3_date = _get_date_from_mp3_path(filepath, showlocal_tz)
        mp3_size = os.stat(filepath).st_size
        mp3_url = base_public_url + filename
        mp3_id = filename.split('.')[0]
        title = "Programa del {0:%d}/{0:%m}/{0:%Y}".format(mp3_date)

        # build the rss entry
        fe = fg.add_entry()
        fe.id(mp3_id)
        fe.pubdate(mp3_date)
        fe.title(title)
        fe.enclosure(mp3_url, str(mp3_size), 'audio/mpeg')

    fg.rss_str(pretty=True)
    fg.rss_file(os.path.join(podcast_dir, '{}.xml'.format(show.id)))
コード例 #4
0
def build_feed(session, feed, final_url='', limit=None, stream=sys.stdout):
    items = session.query(model.FeedItem) \
            .filter(model.FeedItem.feed_id==feed.id) \
            .order_by(model.FeedItem.pubdate.desc()) \
            .limit(min(limit, len(feed.items))).all()

    fg = FeedGenerator()
    fg.title(feed.name)
    fg.link(href=feed.url, rel='alternate')
    fg.image(feed.imgurl, feed.name, final_url)
    fg.description(feed.description)
    fg.link(href=final_url, rel='self')
    fg.language('en')
    fg.ttl(60)
    fg.generator('arxiv-decent-feeds')

    if not items:
        stream.write(fg.rss_str(pretty=True))
        return

    for item in items:
        if not item.pubdate.tzinfo:
            # Add UTC to a naive datetime
            session.expunge(item)
            item.pubdate = pytz.utc.localize(item.pubdate)
        fe = fg.add_entry()
        fe.title(item.title)
        fe.link(href=item.link)
        fe.pubdate(item.pubdate)
        fe.description(item.summary)
        fe.guid(item.guid)

    stream.write(fg.rss_str(pretty=True))
コード例 #5
0
def _create_feed(speaker, talks, file_name):
    LOGGER.info("Creating feed for %s", speaker)
    updated = talks[0]['time']

    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.language('en')
    fg.title(f'Talks By {speaker}')
    fg.link(href='http://philip.lundrigan.org/Speakercast/')
    fg.image(url=f'http://philip.lundrigan.org/Speakercast/covers/{urllib.parse.quote(speaker)}.jpg',
             title=f'General Conference talks by {speaker}.')
    fg.description(f'General Conference talks by {speaker}.')
    fg.author({'name':'Philip Lundrigan', 'email':'*****@*****.**'})
    fg.generator('Speakercast')
    fg.pubDate(updated)
    fg.lastBuildDate(updated)
    fg.podcast.itunes_category('Religion & Spirituality', 'Christianity')

    for talk in talks:
        fe = fg.add_entry()
        fe.id('http://lernfunk.de/media/654321/1/file.mp3')
        fe.title(talk['title'])
        fe.description(talk['preview'])
        fe.content(talk['html'], type='CDATA')
        fe.enclosure(talk['audio_url'], str(talk['audio_size']), 'audio/mpeg')
        fe.id(talk['uri'])
        fe.link(href=talk['url'])
        fe.published(talk['time'])

    fg.rss_file(file_name, pretty=True)
コード例 #6
0
def generate_rss(show_info, show_uri, country_code):
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.description(show_info['description'])
    fg.author({'name': show_info['publisher']})
    fg.title(show_info['name'])
    fg.link({'href': show_info['external_urls']['spotify']})
    fg.id(show_uri)
    fg.image(show_info.get('images')[0]['url'])
    total_episodes = show_info['episodes']['total']
    added_episodes = 0
    while added_episodes != total_episodes:
        episodes = sp.show_episodes(show_id=show_uri,
                                    limit=50,
                                    offset=added_episodes,
                                    market=country_code)
        for episode in episodes['items']:
            ent = fg.add_entry()
            ent.podcast.itunes_duration(int(episode['duration_ms'] / 1000))
            ent.title(episode.get('name'))
            ent.guid(episode['uri'])
            ent.published(
                datetime.strptime(episode['release_date'],
                                  '%Y-%m-%d').replace(tzinfo=timezone.utc))
            ent.description(episode['description'])
            ent.id(episode['uri'])
            ent.enclosure(
                url=
                f"https://anon-podcast.scdn.co/{episode['audio_preview_url'].split('/')[-1]}",
                length=0,
                type='audio/mpeg')
            added_episodes += 1
    return fg.rss_str().decode('utf-8')
コード例 #7
0
def write_podcast(show, podcast_dir, base_public_url, showlocal_tz):
    """Create the podcast file."""
    fg = FeedGenerator()
    fg.load_extension('podcast')

    url = "{}{}.xml".format(base_public_url, show.id)
    fg.id(url.split('.')[0])
    fg.title(show.name)
    fg.image(show.image_url)
    fg.description(show.description)
    fg.link(href=url, rel='self')

    # collect all mp3s for the given show
    all_mp3s = glob.glob(os.path.join(podcast_dir, "{}_*.mp3".format(show.id)))

    for filepath in all_mp3s:
        filename = os.path.basename(filepath)
        mp3_date = _get_date_from_mp3_path(filepath, showlocal_tz)
        mp3_size = os.stat(filepath).st_size
        mp3_url = base_public_url + filename
        mp3_id = filename.split('.')[0]
        title = "Programa del {0:%d}/{0:%m}/{0:%Y}".format(mp3_date)

        # build the rss entry
        fe = fg.add_entry()
        fe.id(mp3_id)
        fe.pubdate(mp3_date)
        fe.title(title)
        fe.enclosure(mp3_url, str(mp3_size), 'audio/mpeg')

    fg.rss_str(pretty=True)
    fg.rss_file(os.path.join(podcast_dir, '{}.xml'.format(show.id)))
コード例 #8
0
class PodcastBaseItemExporter(BaseItemExporter, metaclass=abc.ABCMeta):
    """Item exporter base class designed to generate RSS feeds.

    The class uses feedgen to generate the RSS content.
    Subclasses are expected to implement the method save_to_storage.
    """
    def __init__(self, uri, title, description, url, image_url, **kwargs):
        """Initializes the exporter.

        Args:
            uri: Where to save the feed.
            title: Podcast title.
            description: Description of the podcast.
            url: Url of the podcast.
            image_url: Main image of the podcast.
            **kwargs: Any extra argument for BaseItemExporter.
        """
        super().__init__(**kwargs)
        self.uri = uri

        self.fg = FeedGenerator()
        self.fg.load_extension('podcast')

        self.fg.title(title)
        self.fg.description(description)
        self.fg.link(href=url)
        self.fg.image(image_url)
        self.fg._FeedGenerator__rss_lastBuildDate = None  # This prevents Plex from confusing pubDate with lastBuildDate

    def export_item(self, item):
        """Adds a new entry to the RSS feed.

        Args:
            item: A PodcastEpisodeItem.
        """
        fe = self.fg.add_entry()

        title = item.get('title')
        description = item.get('description')
        publication_date = item.get('publication_date')
        audio_url = item.get('audio_url')
        guid = item.get('guid')

        fe.title(title)
        fe.description(description)
        fe.published(publication_date)
        fe.enclosure(audio_url, 0, 'audio/mpeg')
        fe.guid(guid)

    def finish_exporting(self):
        """Generates the RSS content and saves it to a file"""
        rss_content = self.fg.rss_str(pretty=True)
        self.save_to_storage(rss_content)

    @abc.abstractmethod
    def save_to_storage(self, rss_content):
        """Subclasses must implement a way of saving the content.
        """
        pass
コード例 #9
0
ファイル: feedgen.py プロジェクト: n3f/filecasted
    def create(self):
        '''
        Create the RSS/podcast file

        [Required fields](https://help.apple.com/itc/podcasts_connect/#/itcb54353390)
        [Example RSS](https://help.apple.com/itc/podcasts_connect/#/itcbaf351599)

        :return:
        '''
        if self.verbose > 0:
            click.echo(self)
        fg = FeedGenerator()
        fg.load_extension('podcast')

        # Required
        fg.podcast.itunes_category('Arts')
        fg.title('The Title')
        # fg.podcast.itunes_title('Test')
        image_url = f'{self.base_url}/folder.jpg'
        fg.image(image_url)
        fg.podcast.itunes_image(image_url)
        fg.description('Audiobook')
        fg.language('en-us')
        # Explicit options: yes/no/clean
        fg.podcast.itunes_explicit('clean')

        # Optional
        # fg.author({'name':'Filecasted', 'email':'*****@*****.**')
        fg.link({
            'href': self.base_url,
            # atom required
            'rel': 'alternate'
        })

        start = datetime.now(timezone.utc)

        for i, feed in enumerate(self._input):
            fe = fg.add_entry()
            # Required
            fe.title(feed.name)
            url = '/'.join([
                self.base_url,
                urllib.parse.quote(os.path.relpath(feed, self.output.parent))
            ])
            fe.enclosure(url=url,
                         length=str(os.path.getsize(feed)),
                         type=mimetypes.guess_type(feed.as_uri())[0])
            fe.pubDate(start + timedelta(hours=i))
            # Optional
            # fe.podcast.itunes_block(True)
            # description
            # duration
        if self.verbose > 1:
            click.echo(fg.rss_str(pretty=True))
        fg.rss_file(self.output.as_posix())
コード例 #10
0
ファイル: application.py プロジェクト: martyni/fb_auth
def rss_feed(feed="page", db="https://notdb.martyni.co.uk"):
    bucket = "authmartynicouk"
    if request.args.get("tag"):
        feed = request.args.get("tag")
    feed_url = "{db}/{bucket}/list/{feed}?reverse=true".format(db=db,
                                                               bucket=bucket,
                                                               feed=feed)
    print feed_url
    episodes_links = requests.get(feed_url).json()
    episodes = [requests.get(db + link).json() for link in episodes_links]
    print episodes
    description = str(episodes[-1].get('description'))
    author = str(episodes[-1].get('author'))
    title = feed
    email = "*****@*****.**"
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.id(request.url)
    fg.podcast.itunes_category('Technology', 'Podcasting')
    fg.author({'name': author, 'email': email})
    fg.link(href=request.url, rel='self')
    fg.description(description)
    fg.title(title)
    fg.image(url="{db}/{bucket}/file/{feed}_image.png".format(db=db,
                                                              bucket=bucket,
                                                              feed=feed),
             title=feed.title(),
             link=request.url,
             width='123',
             height='123',
             description=description)
    counter = 1

    for i in episodes:
        try:
            print type(i)
            if type(i) is not dict:
                i = json.loads('"' + i.replace('"', "'") + '"')
            author = i.get("author") or "anonymous"
            email = i.get("email") or "*****@*****.**"
            title = str(i.get('title')).title() or "title"
            contents = i.get("contents")[0].replace("`", "'").replace(
                u"¬", "'") or "contents"
            fe = fg.add_entry()
            fe.id(str(counter) + "mp3")
            fe.title(str(i.get('title')).title())
            fe.description(contents)
            if i.get("media"):
                fe.enclosure(i.get("media"), 0, 'audio/mpeg')
            fe.link(href=request.url, rel='alternate')
            fe.author(name=author, email=email)
        except:
            pass
        counter += 1
    return Response(fg.rss_str(), mimetype='text/xml')
コード例 #11
0
def rssvideoschannel(request, channel_id):
    channel = Channel.objects.get(channel_id=channel_id)
    if not channel:
        return Http404

    videos = channel.video_set.order_by('-pub_date')
    fg = FeedGenerator()
    fg.load_extension('podcast')

    channelURL = ''.join([
        'http://',
        get_current_site(request).domain,
        reverse('you2rss:videoperchannel', args=(channel_id, ))
    ])
    fg.id(channelURL)
    fg.title(channel.title_text)
    fg.author({'name': 'pon sko', 'email': '*****@*****.**'})
    fg.link(href=channelURL, rel='alternate')
    description = channel.description_text
    if len(description) < 2:
        description = "no desc"
    fg.subtitle(description)
    fg.description(description)
    fg.language('en')
    fg.logo(logo=channel.thumbnail)
    fg.image(url=channel.thumbnail, title=channel.title_text)
    fg.podcast.itunes_image(channel.thumbnail)

    for video in videos:
        fe = fg.add_entry()
        fe.author(name=channel.title_text)
        videodesc = video.description_text
        if len(videodesc) < 2:
            videodesc = "no desc"
        fe.content(videodesc)
        fileURL = ''.join([
            'http://',
            get_current_site(request).domain,
            reverse('you2rss:rssfile', args=(video.video_id, ))
        ])

        fe.enclosure(fileURL, '1337', 'audio/mpeg')
        fe.id(fileURL)
        fe.link(href=fileURL, rel='alternate')
        fe.podcast.itunes_image(video.thumbnail)
        fe.pubdate(video.pub_date)
        fe.published(video.pub_date)
        fe.title(video.title_text)

    rssdata = fg.rss_str(pretty=True)
    response = HttpResponse(rssdata,
                            content_type='application/rss+xml; charset=UTF-8')
    response['Content-Length'] = len(rssdata)
    return response
コード例 #12
0
def staticrss(request, podcast_id):
    podcast = Podcast.objects.get(id=podcast_id)
    if not podcast:
        return Http404

    pods = podcast.pod_set.order_by('-audio_link')
    fg = FeedGenerator()
    fg.load_extension('podcast')

    channelURL = ''.join([
        'http://',
        get_current_site(request).domain,
        reverse('you2rss:staticrss', args=(podcast_id, ))
    ])
    fg.id(channelURL)
    fg.title(podcast.title_text)
    fg.author({'name': 'pon sko', 'email': '*****@*****.**'})
    fg.link(href=channelURL, rel='alternate')
    description = podcast.description_text
    if len(description) < 2:
        description = "no desc"
    fg.subtitle(description)
    fg.description(description)
    fg.language('en')
    fg.logo(logo=podcast.thumbnail)
    fg.image(url=podcast.thumbnail, title=podcast.title_text)
    fg.podcast.itunes_image(podcast.thumbnail)
    for pod in pods:
        fe = fg.add_entry()
        fe.author(name=podcast.title_text)
        desc = pod.description_text
        if len(desc) < 2:
            desc = "no desc"
        fe.content(desc)
        fileURL = pod.audio_link
        #''.join(['http://', get_current_site(request).domain,
        #                   reverse('you2rss:rssfile', args=(video.video_id,))])

        fe.enclosure(fileURL, pod.audio_size, pod.audio_type)
        fe.id(fileURL)
        fe.link(href=fileURL, rel='alternate')
        fe.podcast.itunes_image(podcast.thumbnail)
        fe.pubdate(pod.pub_date)
        fe.published(pod.pub_date)
        fe.title(pod.title_text)

    rssdata = fg.rss_str(pretty=True)
    response = HttpResponse(rssdata,
                            content_type='application/rss+xml; charset=UTF-8')
    response['Content-Length'] = len(rssdata)
    return response
コード例 #13
0
def feed(request, **kwargs):
    #return HttpResponse("Howdy!")
    pk = kwargs.get('pk')

    podcast = Podcast.objects.get(pk=pk)

    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.id("{}-{}".format(podcast.id, podcast.title).replace(' ', '_'))
    fg.title(podcast.title)
    fg.author({'name': podcast.author, 'email': '*****@*****.**'})
    fg.link(href=request.build_absolute_uri(
        reverse('podcast_feed', kwargs={'pk': pk})))
    #fg.logo('http://www.mormonnewsroom.org/media/960x540/ElderBallard.jpg')
    fg.image(url=podcast.cover,
             title=podcast.title,
             link=request.build_absolute_uri(
                 reverse('podcast_items', kwargs={'pk': pk})),
             width='800',
             height='600')
    fg.description(podcast.description)
    #fg.summary(podcast.summary)
    #fg.author({'name': 'why me, of course', 'email':'*****@*****.**'})
    # author not actually put in the feed

    if pk:

        item_list = Podcast_Item.objects.filter(
            podcast__pk=pk
        )  #Item.objects.filter(podcast_item__podcast__pk=kwargs.get('pk'))
    else:
        item_list = Podcast_Item.objects.all()

    for item in item_list:

        fe = fg.add_entry()

        fe.id("{}-{}".format(item.id, item.item.title.replace(' ', '_')))
        fe.title(item.item.title)
        fe.pubdate(item.pub_date)
        fe.link(href=item.item.link)
        fe.description(item.item.description)
        fe.enclosure(item.item.link, 0, 'audio/mpeg')
        #fe.id('http://media2.ldscdn.org/assets/general-conference/april-2018-general-conference/2018-03-1020-m-russell-ballard-64k-eng.mp3?download=true')
        # fe.id('12345')
        #fe.title('First')
        #fe.link(href='http://35.224.79.205/first')
        #fe.description('This is the description of the first item')
        #fe.enclosure('http://media2.ldscdn.org/assets/general-conference/april-2018-general-conference/2018-03-1020-m-russell-ballard-64k-eng.mp3?download=true', 0, 'audio/mpeg')

    return HttpResponse(fg.rss_str(pretty=False), content_type="text/xml")
コード例 #14
0
def podcast():
    #locale.setlocale(locale.LC_ALL, "it_IT.utf8")

#    base_url = "https://podcast02.unitedradio.it/virginradio.it//NEW/upload/uploadedContent/repliche/drfeelgood/"
    base_url = "http://podcast.mediaset.net/repliche//2020/1/8/"
    base_url_2="_drfeelgood.mp3"


    today= dt.datetime.today().replace(hour=9,minute=0, second=0)

    days=[today+dt.timedelta(days=i) for i in range(-6,1)]
    days=list(filter(lambda x: (x.weekday()<5), days))

    urls=[ base_url+day(d.weekday())+d.strftime("_%d%m%Y")+base_url_2 for d in days]

    fg=FeedGenerator()
    fg.load_extension("podcast")
    fg.title("Rock and Talk")
    fg.description("Last episodes from Virgin Radio Rock and Talk")
    fg.link(href="https://www.virginradio.it/sezioni/1154/rock-talk")



    fg.podcast.itunes_author("Virgin Radio")
    fg.podcast.itunes_category("Speech", "Rock")
    fg.podcast.itunes_explicit("no")
    fg.podcast.itunes_complete("no")
    fg.podcast.itunes_new_feed_url("http://example.com/new-feed.rss")
    fg.podcast.itunes_summary("Last episodes from Virgin Radio Rock and Talk")

    fg.logo(logo="https://www.virginradio.it/resizer/628/355/true/1548173388720.png--.png?1548173388000")
    fg.image(url="https://www.virginradio.it/resizer/628/355/true/1548173388720.png--.png?1548173388000", title="Rock and talk")
    fg.podcast.itunes_image("https://www.virginradio.it/resizer/628/355/true/1548173388720.png--.png?1548173388000")



    for d,u in zip(days,urls):
        fe = fg.add_entry()
        fe.id(u)
        gg=d.strftime("%A %d %B")
        fe.title(gg)
        fe.description(gg)
        fe.summary(gg)
        #fe.link(href=u, rel="alternate")
        fe.link(href="https://www.virginradio.it/sezioni/1154/rock-talk", rel="alternate") 
        fe.enclosure(u, 0, "audio/mpeg")
        fe.published(pytz.utc.localize(d))

    #fg.rss_file("podcast.xml")

    return fg.rss_str(pretty=True)
コード例 #15
0
def feed():
    # Entries are added backwards
    episodes = Episode.query.order_by(asc(Episode.date)).all()

    fg = FeedGenerator()
    fg.load_extension("podcast")
    fg.title("The Crypto-Mises Podcast")
    fg.podcast.itunes_author("Satoshi Nakamoto Institute")
    fg.link(href=url_for("main.index", _external=True), rel="alternate")
    fg.subtitle("The official podcast of the Satoshi Nakamoto Institute")
    fg.language("en")
    fg.copyright("cc-by-sa")
    fg.podcast.itunes_summary(
        "Michael Goldstein and Daniel Krawisz of the Satoshi Nakamoto Institute discuss Bitcoin, economics, and cryptography."  # noqa
    )
    fg.podcast.itunes_owner("Michael Goldstein", "*****@*****.**")
    fg.link(href=url_for("podcast.feed", _external=True), rel="self")
    fg.podcast.itunes_explicit("no")
    fg.image(url_for("static", filename="img/cryptomises/cmpodcast_144.jpg"))
    fg.podcast.itunes_image(
        url_for("static", filename="img/cryptomises/cmpodcast_1440.jpg"))
    fg.podcast.itunes_category("Technology", "Tech News")

    for episode in episodes:
        description = f"""{episode.summary}
        If you enjoyed this episode, show your support by donating to SNI:
        {url_for('main.donate', _external=True)}"""
        enclosure_url = (
            f"https://s3.amazonaws.com/nakamotoinstitute/cryptomises/{episode.slug}.mp3"
        )

        fe = fg.add_entry()
        fe.id(url_for("podcast.detail", slug=episode.slug, _external=True))
        fe.title(episode.title)
        fe.podcast.itunes_summary(description)
        fe.description(description)
        fe.podcast.itunes_subtitle(episode.subtitle)
        fe.podcast.itunes_author("Satoshi Nakamoto Institute")
        fe.enclosure(enclosure_url, 0, "audio/mpeg")
        fe.podcast.itunes_duration(episode.duration)
        fe.pubDate(localize_time(episode.time))

    response = make_response(fg.rss_str(encoding="utf-8", pretty=True))
    response.headers.set("Content-Type", "application/rss+xml")
    return response
コード例 #16
0
def generateRss(fileList, directoryPath, baseUrl):

    fg = FeedGenerator()

    fg.load_extension('podcast')

    #Setup Feed
    fg.title(Config.podcastTitle)
    baseUrl = Config.baseUrl
    feed = baseUrl + '/rss'
    fg.link(href=feed)
    fg.description(Config.podcastDescription)
    fg.language('en')
    mainImage = baseUrl + '/files?filename=' + Config.mainImage
    fg.image(mainImage)
    #using Technology as other tags like Christianity wont validate
    fg.podcast.itunes_category('Technology', 'Podcasting')
    fg.podcast.itunes_owner(name=Config.ownerName, email=Config.ownerEmail)

    #Setup episodes
    for i in fileList:
        fe = fg.add_entry()
        mp3 = directoryPath + i['title']

        # for local files
        # stat = os.stat(mp3)
        # size = os.path.getsize(mp3)
        # mp3File = baseUrl + '/files?filename=' + i['title']
        # fe.enclosure(mp3File, str(size) , 'audio/mp3')

        fe.enclosure(i['url'], str(i['size']), 'audio/mp3')
        fe.title(i['title'])
        descriptionText = 'Authors: ' + i['authors']
        fe.description(descriptionText)
        link = baseUrl + '/files?filename=' + i['title']
        fg.link(href=i['url'])
        fe.podcast.itunes_explicit('no')
        image = baseUrl + '/files?filename=' + Config.mainImage
        fe.podcast.itunes_image(image)

    #Save Rss
    fg.rss_str(pretty=True)
    saveLocation = directoryPath + 'podcast.xml'
    fg.rss_file(saveLocation)
コード例 #17
0
ファイル: feed.py プロジェクト: dyeray/podtube
def render_feed(feed_id: str, plugin: Plugin, options: GlobalOptions,
                base_url: str):
    feed = plugin.get_feed(feed_id)
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.title(feed.title)
    fg.description(feed.description)
    fg.link(href=feed.link, rel='alternate')
    fg.image(options.icon or feed.image)
    fg.id(feed.feed_id)
    for item in reversed(feed.items):
        fe = fg.add_entry()
        fe.id(item.item_id)
        fe.title(item.title)
        fe.description(item.description)
        fe.pubDate(item.date)
        fe.podcast.itunes_image(item.image)
        fe.enclosure(generate_url(item, plugin, options, base_url),
                     item.content_length, item.content_type)
    return fg.rss_str(pretty=True) if options.format == 'rss' else fg.atom_str(
        pretty=True)
コード例 #18
0
def create_feed(posts):
    fg = FeedGenerator()
    fg.id(SITE_URL)
    fg.title(SITE_NAME)
    fg.author(AUTHOR_INFO)
    fg.link(href=SITE_URL, rel='alternate')
    fg.link(href=SITE_URL + '/feed.atom', rel='self')
    fg.language(FEED_LANGUAGE)
    fg.image(url=IMG_URL)

    for i in range(min(10, len(posts))):
        post = posts[i]
        content = makeAtomContent(post['content'])
        fe = fg.add_entry()
        fe.id(fg.id() + '/' + post['url'])
        fe.title(post['title'])
        fe.link(href=fe.id())
        fe.published(post['date'].replace(tzinfo=tzutc()))
        fe.content(content, type="CDATA")

    return fg.atom_str(pretty=True).decode('utf-8')
コード例 #19
0
def generate_feed(input_file, output_file):
    fg = FeedGenerator()
    fg.load_extension('podcast', rss=True)

    ## RSS tags
    # Required
    fg.title(TITLE)
    fg.link(href=LINK)
    fg.description(DESCRIPTION)
    # Optional
    fg.language('en')
    fg.image(url=IMAGE_URL, title=TITLE, link=LINK)
    fg.ttl(720)
    fg.webMaster(CONTACT['name'])
    now = datetime.datetime.now()
    tz = pytz.timezone('Europe/Amsterdam')
    fg.pubDate(tz.localize(now))
    # iTunes
    fg.podcast.itunes_author('Dan LeBatard')
    fg.podcast.itunes_category(itunes_category='Sports & Recreation',
                               itunes_subcategory='Professional')
    fg.podcast.itunes_image(itunes_image=IMAGE_URL)
    fg.podcast.itunes_explicit(itunes_explicit='clean')
    fg.podcast.itunes_owner(name=CONTACT['name'], email=CONTACT['email'])

    # Add items
    items = read_items(input_file)
    for item in items:
        fe = fg.add_entry()

        ## RSS tags
        fe.id(item['guid'])
        fe.title(item['title'])
        fe.description(item['description'])
        fe.enclosure(item['link'], 0, 'audio/mpeg')
        fe.pubdate(item['pubDate'])

    # Finish off the file
    fg.rss_str(pretty=True)
    fg.rss_file(output_file)
コード例 #20
0
def generate_feed(input_file, output_file):
    fg = FeedGenerator()
    fg.load_extension('podcast', rss=True)

    ## RSS tags
    # Required
    fg.title(TITLE)
    fg.link(href=LINK)
    fg.description(DESCRIPTION)
    # Optional
    fg.language('en')
    fg.image(url=IMAGE_URL, title=TITLE, link=LINK)
    fg.ttl(720)
    fg.webMaster(CONTACT['name'])
    now = datetime.datetime.now()
    tz = pytz.timezone('Europe/Amsterdam')
    fg.pubDate(tz.localize(now))
    # iTunes
    fg.podcast.itunes_author('Dan LeBatard')
    fg.podcast.itunes_category(itunes_category='Sports & Recreation', itunes_subcategory='Professional')
    fg.podcast.itunes_image(itunes_image=IMAGE_URL)
    fg.podcast.itunes_explicit(itunes_explicit='clean')
    fg.podcast.itunes_owner(name=CONTACT['name'], email=CONTACT['email'])

    # Add items
    items = read_items(input_file)
    for item in items:
        fe = fg.add_entry()

        ## RSS tags
        fe.id(item['guid'])
        fe.title(item['title'])
        fe.description(item['description'])
        fe.enclosure(item['link'], 0, 'audio/mpeg')
        fe.pubdate(item['pubDate'])

    # Finish off the file
    fg.rss_str(pretty=True)
    fg.rss_file(output_file)
コード例 #21
0
 def build_feed(self, commits: List[Commit]):
     log.info("build feed page %d" % len(commits))
     feed = FeedGenerator()
     feed.id("")
     feed.title("AWS API Changes")
     feed.author({
         "name": "AWSPIChanges",
         "email": "https://github.com/awslabs/aws-sdk-api-changes",
     })
     feed.link(href=self.site_url, rel="alternate")
     feed.link(href="%s/feed/" % self.site_url, rel="self")
     feed.description("AWS API ChangeLog")
     feed.language("en-US")
     feed.generator("artisan-sdk-gitops")
     feed.image(
         url=
         "https://a0.awsstatic.com/main/images/logos/aws_logo_smile_179x109.png"
     )  # noqa
     for c in commits:
         for s in c.service_changes:
             fe = feed.add_entry(order="append")
             fe.title("{} - {}{}methods".format(
                 s.title,
                 s.count_new and "%d new " % s.count_new or "",
                 s.count_updated and "%d updated " % s.count_updated or "",
             ))
             fe.id("{}-{}".format(c.id, s.name))
             fe.description(s.change_log)
             fe.link({
                 "href":
                 self.link("archive/changes/%s-%s.html" %
                           (c.id[:6], s.name))
             })
             fe.published(c.created)
     self.render_page(
         "feed/feed.rss",
         force=True,
         content=feed.rss_str(pretty=True).decode("utf8"),
     )
コード例 #22
0
def make_rss_feed(request: Request, packages: list, date_attr: str):
    """ Create an RSS Feed string for some packages.

    :param request: A FastAPI request
    :param packages: A list of packages to add to the RSS feed
    :param date_attr: The date attribute (DB column) to use
    :return: RSS Feed string
    """

    feed = FeedGenerator()
    feed.title("AUR Newest Packages")
    feed.description("The latest and greatest packages in the AUR")
    base = f"{request.url.scheme}://{request.url.netloc}"
    feed.link(href=base, rel="alternate")
    feed.link(href=f"{base}/rss", rel="self")
    feed.image(title="AUR Newest Packages",
               url=f"{base}/static/css/archnavbar/aurlogo.png",
               link=base,
               description="AUR Newest Packages Feed")

    for pkg in packages:
        entry = feed.add_entry(order="append")
        entry.title(pkg.Name)
        entry.link(href=f"{base}/packages/{pkg.Name}", rel="alternate")
        entry.link(href=f"{base}/rss", rel="self", type="application/rss+xml")
        entry.description(pkg.Description or str())

        attr = getattr(pkg.PackageBase, date_attr)
        dt = filters.timestamp_to_datetime(attr)
        dt = filters.as_timezone(dt, request.user.Timezone)
        entry.pubDate(dt.strftime("%Y-%m-%d %H:%M:%S%z"))

        entry.source(f"{base}")
        if pkg.PackageBase.Maintainer:
            entry.author(author={"name": pkg.PackageBase.Maintainer.Username})
        entry.guid(f"{pkg.Name} - {attr}")

    return feed.rss_str()
コード例 #23
0
ファイル: org2podcast.py プロジェクト: amoblin/tools
nodelist = Orgnode.makelist(sys.argv[1])

info = nodelist[0]
properties = info.Properties()
fg.podcast.itunes_category('Technology', 'Podcasting')

fg.title(info.Heading())
fg.author( {'name':properties['author'],'email':properties['email']} )
fg.id(properties["id"])
fg.link( href='http://whoomin.marboo.biz', rel='alternate' )
fg.logo(properties["logo"])
fg.subtitle(properties["subtitle"])
fg.link(href=properties["link"], rel='self' )
fg.language(properties["language"])
fg.image(properties["image"], height="140", width="140")
fg.rights(properties["copyright"])
fg.podcast.itunes_author(properties["author"])
fg.podcast.itunes_subtitle(properties["subtitle"])
fg.podcast.itunes_summary(properties["summary"])
#fg.podcast.itunes_keywords(properties["keywords"])
#fg.ttl(1440)

for i in range(1, len(nodelist)):
    node = nodelist[i]
    if node.Todo() == "DRAFT":
        continue
    mp3_length = "1024"
    fe = fg.add_entry()

    title = node.Heading()
コード例 #24
0
  '''
  print(s.decode('utf-8') if type(s) == type(b'') else s)

fg = FeedGenerator()
fg.load_extension('podcast')
fg.title('The Crypto-Mises Podcast')
fg.podcast.itunes_author('Satoshi Nakamoto Institute')
fg.link( href='http://nakamotoinstitute.org/', rel='alternate' )
fg.subtitle('The official podcast of the Satoshi Nakamoto Institute')
fg.language('en')
fg.copyright('cc-by-sa')
fg.podcast.itunes_summary('Michael Goldstein and Daniel Krawisz of the Satoshi Nakamoto Institute discuss Bitcoin, economics, and cryptography.')
fg.podcast.itunes_owner('Michael Goldstein', '*****@*****.**')
fg.link( href='http://nakamotoinstitute.org/podcast/feed/', rel='self' )
fg.podcast.itunes_explicit('no')
fg.image('http://nakamotoinstitute.org/static/img/cryptomises/cmpodcast_144.jpg')
fg.podcast.itunes_image('http://nakamotoinstitute.org/static/img/cryptomises/cmpodcast_1440.jpg')
fg.podcast.itunes_category('Technology', 'Tech News')


eps = Episode.query.order_by(desc(Episode.date)).all()

for ep in eps:
  fe = fg.add_entry()
  fe.id('http://nakamotoinstitute/podcast/'+ep.slug+'/')
  fe.title(ep.title)
  fe.podcast.itunes_summary(ep.summary + ' If you enjoyed this episode, show your support by donating to SNI: ' + ep.address)
  fe.podcast.itunes_subtitle(ep.subtitle)
  fe.podcast.itunes_author('Satoshi Nakamoto Institute')
  fe.enclosure('https://s3.amazonaws.com/nakamotoinstitute/cryptomises/'+ep.slug+'.mp3', 0, 'audio/mpeg')
  fe.podcast.itunes_duration(ep.duration)
コード例 #25
0
    '''
    print(s.decode('utf-8') if type(s) == type(b'') else s)

fg = FeedGenerator()
fg.load_extension('podcast')
fg.title('The Crypto-Mises Podcast')
fg.podcast.itunes_author('Satoshi Nakamoto Institute')
fg.link(href='http://nakamotoinstitute.org/', rel='alternate')
fg.subtitle('The official podcast of the Satoshi Nakamoto Institute')
fg.language('en')
fg.copyright('cc-by-sa')
fg.podcast.itunes_summary('Michael Goldstein and Daniel Krawisz of the Satoshi Nakamoto Institute discuss Bitcoin, economics, and cryptography.')
fg.podcast.itunes_owner('Michael Goldstein', '*****@*****.**')
fg.link(href='http://nakamotoinstitute.org/podcast/feed/', rel='self')
fg.podcast.itunes_explicit('no')
fg.image('http://nakamotoinstitute.org/static/img/cryptomises/cmpodcast_144.jpg')
fg.podcast.itunes_image('http://nakamotoinstitute.org/static/img/cryptomises/cmpodcast_1440.jpg')
fg.podcast.itunes_category('Technology', 'Tech News')


eps = Episode.query.order_by(desc(Episode.date)).all()

for ep in eps:
    fe = fg.add_entry()
    fe.id('http://nakamotoinstitute/podcast/'+ep.slug+'/')
    fe.title(ep.title)
    fe.podcast.itunes_summary(ep.summary + ' If you enjoyed this episode, show your support by donating to SNI: ' + ep.address)
    fe.podcast.itunes_subtitle(ep.subtitle)
    fe.podcast.itunes_author('Satoshi Nakamoto Institute')
    fe.enclosure('https://s3.amazonaws.com/nakamotoinstitute/cryptomises/'+ep.slug+'.mp3', 0, 'audio/mpeg')
    fe.podcast.itunes_duration(ep.duration)
コード例 #26
0
ファイル: rss.py プロジェクト: snarfed/granary
def from_activities(activities, actor=None, title=None, feed_url=None,
                    home_page_url=None, hfeed=None):
  """Converts ActivityStreams activities to an RSS 2.0 feed.

  Args:
    activities: sequence of ActivityStreams activity dicts
    actor: ActivityStreams actor dict, the author of the feed
    title: string, the feed title
    feed_url: string, the URL for this RSS feed
    home_page_url: string, the home page URL
    hfeed: dict, parsed mf2 h-feed, if available

  Returns:
    unicode string with RSS 2.0 XML
  """
  try:
    iter(activities)
  except TypeError:
    raise TypeError('activities must be iterable')

  if isinstance(activities, (dict, basestring)):
    raise TypeError('activities may not be a dict or string')

  fg = FeedGenerator()
  fg.id(feed_url)
  assert feed_url
  fg.link(href=feed_url, rel='self')
  if home_page_url:
    fg.link(href=home_page_url, rel='alternate')
  # TODO: parse language from lang attribute:
  # https://github.com/microformats/mf2py/issues/150
  fg.language('en')
  fg.generator('granary', uri='https://granary.io/')

  hfeed = hfeed or {}
  actor = actor or {}
  image = util.get_url(hfeed, 'image') or util.get_url(actor, 'image')
  if image:
    fg.image(image)

  props = hfeed.get('properties') or {}
  content = microformats2.get_text(util.get_first(props, 'content', ''))
  summary = util.get_first(props, 'summary', '')
  desc = content or summary or '-'
  fg.description(desc)  # required
  fg.title(title or util.ellipsize(desc))  # required

  latest = None
  enclosures = False
  for activity in activities:
    obj = activity.get('object') or activity
    if obj.get('objectType') == 'person':
      continue

    item = fg.add_entry()
    url = obj.get('url')
    item.id(obj.get('id') or url)
    item.link(href=url)
    item.guid(url, permalink=True)

    item.title(obj.get('title') or obj.get('displayName') or '-')  # required
    content = microformats2.render_content(
      obj, include_location=True, render_attachments=False) or obj.get('summary')
    if content:
      item.content(content, type='CDATA')

    item.category(
      [{'term': t['displayName']} for t in obj.get('tags', [])
       if t.get('displayName') and t.get('verb') not in ('like', 'react', 'share')])

    author = obj.get('author', {})
    item.author({
      'name': author.get('displayName') or author.get('username'),
      'uri': author.get('url'),
    })

    published = obj.get('published') or obj.get('updated')
    if published:
      try:
        dt = mf2util.parse_datetime(published)
        if not isinstance(dt, datetime):
          dt = datetime.combine(dt, time.min)
        if not dt.tzinfo:
          dt = dt.replace(tzinfo=util.UTC)
        item.published(dt)
        if not latest or dt > latest:
          latest = dt
      except ValueError:  # bad datetime string
        pass


    for att in obj.get('attachments', []):
      stream = util.get_first(att, 'stream') or att
      if not stream:
        continue

      url = stream.get('url') or ''
      mime = mimetypes.guess_type(url)[0] or ''
      if (att.get('objectType') in ENCLOSURE_TYPES or
          mime and mime.split('/')[0] in ENCLOSURE_TYPES):
        enclosures = True
        item.enclosure(url=url, type=mime, length='REMOVEME') # TODO: length (bytes)

        item.load_extension('podcast')
        duration = stream.get('duration')
        if duration:
          item.podcast.itunes_duration(duration)

  if enclosures:
    fg.load_extension('podcast')
    fg.podcast.itunes_author(actor.get('displayName') or actor.get('username'))
    if summary:
      fg.podcast.itunes_summary(summary)
    fg.podcast.itunes_explicit('no')
    fg.podcast.itunes_block(False)

  if latest:
    fg.lastBuildDate(latest)

  return fg.rss_str(pretty=True).decode('utf-8').replace(' length="REMOVEME"', '')
コード例 #27
0
			if args['--subcategory'] is not None:
				fg.podcast.itunes_category(args['--category'], args['--subcategory'])
			else:
				fg.podcast.itunes_category(args['--category'])
		
		#Podcast Details
		fg.title(args['--title'])
		fg.link(href=args['--link'], rel='alternate')
		fg.description(args['--desc'])
		
		if args['--id'] is not None:
			fg.id(args['--id'])
		
		if args['--logo'] is not None:
			fg.logo(logo=args['--logo'])
			fg.image(url=args['--logo'], title=args['--title'])
			fg.podcast.itunes_image(args['--logo'])
			
		if args['--language'] is not None:
			fg.language(args['--language'])
			
		if args['--feed-link'] is not None:
			fg.link(href=args['--feed-link'], rel='self')
		
		#Clean-up link string: trim spaces and remove trailing slash
		link = args['--link'].strip()
		if link[len(link) - 1] == '/':
			link = link[:len(link)-1]
		
		#Generate feed items from files in directory.
		for item in feed_files:
コード例 #28
0
ファイル: rss.py プロジェクト: whyouare111/granary
def from_activities(activities,
                    actor=None,
                    title=None,
                    feed_url=None,
                    home_page_url=None,
                    hfeed=None):
    """Converts ActivityStreams activities to an RSS 2.0 feed.

  Args:
    activities: sequence of ActivityStreams activity dicts
    actor: ActivityStreams actor dict, the author of the feed
    title: string, the feed title
    feed_url: string, the URL for this RSS feed
    home_page_url: string, the home page URL
    hfeed: dict, parsed mf2 h-feed, if available

  Returns:
    unicode string with RSS 2.0 XML
  """
    try:
        iter(activities)
    except TypeError:
        raise TypeError('activities must be iterable')

    if isinstance(activities, (dict, str)):
        raise TypeError('activities may not be a dict or string')

    fg = FeedGenerator()
    fg.id(feed_url)
    assert feed_url
    fg.link(href=feed_url, rel='self')
    if home_page_url:
        fg.link(href=home_page_url, rel='alternate')
    # TODO: parse language from lang attribute:
    # https://github.com/microformats/mf2py/issues/150
    fg.language('en')
    fg.generator('granary', uri='https://granary.io/')

    hfeed = hfeed or {}
    actor = actor or {}
    image = (util.get_url(hfeed.get('properties', {}), 'photo')
             or util.get_url(actor, 'image'))
    if image:
        fg.image(image)

    props = hfeed.get('properties') or {}
    content = microformats2.get_text(util.get_first(props, 'content', ''))
    summary = util.get_first(props, 'summary', '')
    desc = content or summary or '-'
    fg.description(desc)  # required
    fg.title(title or util.ellipsize(desc))  # required

    latest = None
    feed_has_enclosure = False
    for activity in activities:
        obj = activity.get('object') or activity
        if obj.get('objectType') == 'person':
            continue

        item = fg.add_entry()
        url = obj.get('url')
        id = obj.get('id') or url
        item.id(id)
        item.link(href=url)
        item.guid(url, permalink=True)

        # title (required)
        title = (obj.get('title') or obj.get('displayName')
                 or util.ellipsize(obj.get('content', '-')))
        # strip HTML tags
        title = util.parse_html(title).get_text('').strip()
        item.title(title)

        content = microformats2.render_content(obj,
                                               include_location=True,
                                               render_attachments=True,
                                               render_image=True)
        if not content:
            content = obj.get('summary')
        if content:
            item.content(content, type='CDATA')

        categories = [
            {
                'term': t['displayName']
            } for t in obj.get('tags', [])
            if t.get('displayName') and t.get('verb') not in ('like', 'react',
                                                              'share')
            and t.get('objectType') not in ('article', 'person', 'mention')
        ]
        item.category(categories)

        author = obj.get('author', {})
        author = {
            'name': author.get('displayName') or author.get('username'),
            'uri': author.get('url'),
            'email': author.get('email') or '-',
        }
        item.author(author)

        published = obj.get('published') or obj.get('updated')
        if published and isinstance(published, str):
            try:
                dt = mf2util.parse_datetime(published)
                if not isinstance(dt, datetime):
                    dt = datetime.combine(dt, time.min)
                if not dt.tzinfo:
                    dt = dt.replace(tzinfo=util.UTC)
                item.published(dt)
                if not latest or dt > latest:
                    latest = dt
            except ValueError:  # bad datetime string
                pass

        item_has_enclosure = False
        for att in obj.get('attachments', []):
            stream = util.get_first(att, 'stream') or att
            if not stream:
                continue

            url = stream.get('url') or ''
            mime = mimetypes.guess_type(url)[0] or ''
            if (att.get('objectType') in ENCLOSURE_TYPES
                    or mime and mime.split('/')[0] in ENCLOSURE_TYPES):
                if item_has_enclosure:
                    logging.info(
                        'Warning: item %s already has an RSS enclosure, skipping additional enclosure %s',
                        id, url)
                    continue

                item_has_enclosure = feed_has_enclosure = True
                item.enclosure(url=url,
                               type=mime,
                               length=str(stream.get('size', '')))
                item.load_extension('podcast')
                duration = stream.get('duration')
                if duration:
                    item.podcast.itunes_duration(duration)

    if feed_has_enclosure:
        fg.load_extension('podcast')
        fg.podcast.itunes_author(
            actor.get('displayName') or actor.get('username'))
        if summary:
            fg.podcast.itunes_summary(summary)
        fg.podcast.itunes_explicit('no')
        fg.podcast.itunes_block(False)
        name = author.get('name')
        if name:
            fg.podcast.itunes_author(name)
        if image:
            fg.podcast.itunes_image(image)
        fg.podcast.itunes_category(categories)

    if latest:
        fg.lastBuildDate(latest)

    return fg.rss_str(pretty=True).decode('utf-8')
コード例 #29
0
ファイル: podtube.py プロジェクト: kaesi0/PodTube
 def get(self, playlist):
     playlist = playlist.split('/')
     if len(playlist) < 2:
         playlist.append('video')
     playlist_name = '/'.join(playlist)
     self.set_header('Content-type', 'application/rss+xml')
     if playlist_name in playlist_feed and playlist_feed[playlist_name][
             'expire'] > datetime.datetime.now():
         self.write(playlist_feed[playlist_name]['feed'])
         self.finish()
         return
     calls = 0
     payload = {'part': 'snippet', 'id': playlist[0], 'key': key}
     request = requests.get(
         'https://www.googleapis.com/youtube/v3/playlists', params=payload)
     calls += 1
     response = request.json()
     if request.status_code == 200:
         logging.debug('Downloaded Playlist Information')
     else:
         logging.error('Error Downloading Playlist: %s', request.reason)
         self.send_error(reason='Error Downloading Playlist')
         return
     fg = FeedGenerator()
     fg.load_extension('podcast')
     fg.generator('PodTube (python-feedgen)', __version__,
                  'https://github.com/aquacash5/PodTube')
     snippet = response['items'][0]['snippet']
     icon = max(snippet['thumbnails'],
                key=lambda x: snippet['thumbnails'][x]['width'])
     logging.info('Playlist: %s (%s)', playlist[0], snippet['title'])
     fg.title(snippet['title'])
     fg.id('http://' + self.request.host + self.request.uri)
     fg.description(snippet['description'] or ' ')
     fg.author(name='Podtube',
               email='*****@*****.**',
               uri='https://github.com/aquacash5/PodTube')
     fg.podcast.itunes_author(snippet['channelTitle'])
     fg.image(snippet['thumbnails'][icon]['url'])
     fg.link(href=f'http://youtube.com/playlist/?list={playlist}',
             rel='self')
     fg.language('en-US')
     fg.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
     fg.podcast.itunes_explicit('no')
     fg.podcast.itunes_owner(name='Podtube',
                             email='*****@*****.**')
     fg.podcast.itunes_summary(snippet['description'])
     fg.podcast.itunes_category(cat='Technology')
     fg.updated(str(datetime.datetime.utcnow()) + 'Z')
     video = None
     response = {'nextPageToken': ''}
     while 'nextPageToken' in response.keys():
         payload = {
             'part': 'snippet',
             'maxResults': 50,
             'playlistId': playlist[0],
             'key': key,
             'pageToken': response['nextPageToken']
         }
         request = requests.get(
             'https://www.googleapis.com/youtube/v3/playlistItems',
             params=payload)
         calls += 1
         response = request.json()
         if request.status_code == 200:
             logging.debug('Downloaded Playlist Information')
         else:
             logging.error('Error Downloading Playlist: %s', request.reason)
             self.send_error(reason='Error Downloading Playlist Items')
             return
         for item in response['items']:
             snippet = item['snippet']
             current_video = snippet['resourceId']['videoId']
             if 'Private' in snippet['title']:
                 continue
             logging.debug('PlaylistVideo: %s (%s)', current_video,
                           snippet['title'])
             fe = fg.add_entry()
             fe.title(snippet['title'])
             fe.id(current_video)
             icon = max(snippet['thumbnails'],
                        key=lambda x: snippet['thumbnails'][x]['width'])
             fe.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
             fe.updated(snippet['publishedAt'])
             if playlist[1] == 'video':
                 fe.enclosure(
                     url=f'http://{self.request.host}/video/{current_video}',
                     type="video/mp4")
             elif playlist[1] == 'audio':
                 fe.enclosure(
                     url=f'http://{self.request.host}/audio/{current_video}',
                     type="audio/mpeg")
             fe.author(name=snippet['channelTitle'])
             fe.podcast.itunes_author(snippet['channelTitle'])
             fe.pubDate(snippet['publishedAt'])
             fe.link(href=f'http://www.youtube.com/watch?v={current_video}',
                     title=snippet['title'])
             fe.podcast.itunes_summary(snippet['description'])
             fe.description(snippet['description'])
             if not video or video['expire'] < fe.pubDate():
                 video = {'video': fe.id(), 'expire': fe.pubDate()}
     feed = {
         'feed': fg.rss_str(),
         'expire': datetime.datetime.now() + datetime.timedelta(hours=calls)
     }
     playlist_feed[playlist_name] = feed
     self.write(feed['feed'])
     self.finish()
     video = video['video']
     mp3_file = 'audio/{}.mp3'.format(video)
     if playlist[1] == 'audio' and not os.path.exists(
             mp3_file) and video not in conversion_queue.keys():
         conversion_queue[video] = {
             'status': False,
             'added': datetime.datetime.now()
         }
コード例 #30
0
ファイル: feedbridge.py プロジェクト: mod94/botfriend
class Bridge(object):

    NO_VALUE = object()

    def __init__(self, filelike):
        if isstr(filelike):
            feed = filelike
        else:
            feed = filelike.read()

        self.raw = feed
        self.parsed = feedparser.parse(self.raw)
        self.feed = FeedGenerator()

        # Set feed-level values.
        self.build_feed()
        self.build_entries()

    def build_feed(self):
        f = self.parsed.feed

        for field in [
                'id', 'title', 'subtitle', 'updated', 'rights', 'generator',
                'docs', 'language', ('xml_lang', 'language'),
            ('authors', 'author'), ('links', 'link')
        ]:
            self._copy(f, self.feed, field)

        if f.get('image'):
            image_kwargs = {}
            for image_field in 'url', 'title', 'link', 'width', 'height', 'description':
                ignore, value = self._setter(f.image, self.feed, image_field)
                if value is not self.NO_VALUE:
                    image_kwargs[image_field] = value

            if image_kwargs:
                self.feed.image(**image_kwargs)

    def build_entries(self):
        for entry in self.parsed.entries:
            self.build_entry(entry)

    def build_entry(self, parsed):
        built = self.feed.add_entry(order='append')

        # TODO: 'tag' is not supported in feedgen
        for field in [
                'id', 'title', 'updated', 'summary', 'published',
            ('links', 'link')
        ]:
            self._copy(parsed, built, field)

        permalink = parsed.get('link')
        guid_is_link = parsed['guidislink']
        if permalink:
            built.guid(permalink, guid_is_link)

    def _setter(self, feedparser_obj, feedgen_obj, field):
        if isinstance(field, tuple):
            field, method_name = field
        else:
            method_name = field
        setter = getattr(feedgen_obj, method_name, None)
        value = feedparser_obj.get(field, self.NO_VALUE)
        return setter, value

    def _copy(self, feedparser_obj, feedgen_obj, field):
        setter, value = self._setter(feedparser_obj, feedgen_obj, field)
        if value is self.NO_VALUE:
            return
        if not isinstance(value, list):
            value = [value]
        for v in value:
            setter(v)
        if field in feedparser_obj:  # Temporary cleanup
            del feedparser_obj[field]
コード例 #31
0
async def playlist(request, playlist_id, return_type='video'):
    log.info(f'Playlist: {playlist_id}')
    playlist_name = f'{playlist_id}/{return_type}'
    if playlist_name in playlist_feed and playlist_feed[playlist_name][
            'expire'] > datetime.now():
        return raw(playlist_feed[playlist_name]['feed'],
                   content_type='application/rss+xml')
    calls = 0
    payload = {'part': 'snippet', 'id': playlist_id, 'key': KEY}
    log.debug('Downloaded Playlist Information')
    response = json.loads(await get(
        'https://www.googleapis.com/youtube/v3/playlists', params=payload))
    calls += 1
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.generator('PodTube', __version__,
                 'https://github.com/aquacash5/PodTube')
    snippet = response['items'][0]['snippet']
    icon = max(snippet['thumbnails'],
               key=lambda x: snippet['thumbnails'][x]['width'])
    fg.title(snippet['title'])
    fg.id(f'http://{request.headers["host"]}{request.url}')
    fg.description(snippet['description'] or ' ')
    fg.author(name=snippet['channelTitle'])
    fg.image(snippet['thumbnails'][icon]['url'])
    fg.link(href=f'https://www.youtube.com/playlist?list={playlist_id}')
    fg.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
    fg.podcast.itunes_summary(snippet['description'])
    fg.podcast.itunes_category('Technology', 'Podcasting')
    fg.updated(f'{str(datetime.utcnow())}Z')
    response = {'nextPageToken': ''}
    while 'nextPageToken' in response.keys():
        payload = {
            'part': 'snippet',
            'maxResults': 50,
            'playlistId': playlist_id,
            'key': KEY,
            'pageToken': response['nextPageToken']
        }
        response = json.loads(await get(
            'https://www.googleapis.com/youtube/v3/playlistItems',
            params=payload))
        calls += 1
        for item in response['items']:
            snippet = item['snippet']
            current_video = snippet['resourceId']['videoId']
            if 'Private' in snippet['title']:
                continue
            log.debug(f'PlaylistVideo: {current_video} {snippet["title"]}')
            fe = fg.add_entry()
            fe.title(snippet['title'])
            fe.id(current_video)
            icon = max(snippet['thumbnails'],
                       key=lambda x: snippet['thumbnails'][x]['width'])
            fe.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
            fe.updated(snippet['publishedAt'])
            if return_type == 'audio':
                fe.enclosure(
                    url=
                    f'http://{request.headers["host"]}/audio/{current_video}',
                    type="audio/mpeg")
            else:
                fe.enclosure(
                    url=
                    f'http://{request.headers["host"]}/video/{current_video}',
                    type="video/mp4")
            fe.author(name=snippet['channelTitle'])
            fe.podcast.itunes_author(snippet['channelTitle'])
            fe.podcast.itunes_author(snippet['channelTitle'])
            fe.pubdate(snippet['publishedAt'])
            fe.link(href='http://www.youtube.com/watch?v=' + current_video,
                    title=snippet['title'])
            fe.podcast.itunes_summary(snippet['description'])
            fe.description(snippet['description'])
            await sleep(0)
    feed = {
        'feed': fg.rss_str(),
        'expire': datetime.now() + timedelta(hours=calls)
    }
    playlist_feed[playlist_name] = feed
    return raw(feed['feed'], content_type='application/rss+xml')
コード例 #32
0
ファイル: podshrinker.py プロジェクト: cstorey/podshrinker
def feed(uri, verif):
    uri = base64.urlsafe_b64decode(uri.encode('utf8'))
    verif = base64.urlsafe_b64decode(verif.encode('utf8'))
    mac = hmac.new(HMAC_KEY, uri, digestmod=pyblake2.blake2s).digest()
    if not hmac.compare_digest(verif, mac):
        abort(403)

    uri = uri.decode('utf8')
    verify_uri(uri)

    cachefile = pathfor(uri, '.picklejson', FEED_DIR)
    modified = etag = None
    cached = None
    if os.path.isfile(cachefile):
        try:
            with open(cachefile, 'rb') as f:
                cached = jsonpickle.decode(f.read())
                app.logger.debug("Loaded cache from cachefile:%r", cachefile)
                etag = cached.etag if 'etag' in cached else None
                modified = cached.modified if 'modified' in cached else None
        except Exception as e:
            app.logger.warn("Could not load cache:%r", e)

    app.logger.debug("Parse feed: %r; etag:%r; modified:%r", uri, etag,
                     modified)
    parsed = feedparser.parse(uri, etag=etag, modified=modified)

    app.logger.debug("Parsed feed: %r; %r", uri, 'status' in parsed
                     and parsed.status)
    if parsed.status < 200 or parsed.status >= 400:
        app.logger.warn("Non okay status code, 404?")
        abort(404)

    if cached and not parsed.entries:
        parsed = cached

    def save_to_cache():
        with tempfile.NamedTemporaryFile(delete=False, dir=FEED_DIR,
                                         mode='w') as f:
            encoded = jsonpickle.encode(parsed)
            f.write(encoded)
            f.flush()
            os.rename(f.name, cachefile)
            os.chmod(cachefile, 0o644)
            app.logger.debug("Saved cache to cachefile:%r", cachefile)

    def done(fut):
        try:
            fut.result()
        except Exception:
            app.logger.exception("Error saving feed cache")

    pool.submit(save_to_cache).add_done_callback(done)

    feed = FeedGenerator()
    feed.id(uri)
    feed.title(parsed.feed.get('title', None) or '???')
    feed.link(href=parsed.feed.get('link', None) or 'about:blank')
    feed.description(parsed.feed.get('description', None) or '???')
    if 'image' in parsed.feed and 'href' in parsed.feed.image:
        feed.image(parsed.feed.image.href)

    for e in parsed.entries:
        try:
            entry = feed.add_entry(order='append')
            id = e.id if 'id' in e else None

            for l in (e.links if 'links' in e else []):
                if l.rel == 'enclosure' and 'href' in l:
                    if not id:
                        id = l.href
                    storename = transcoded_href(l.href)
                    entry.enclosure(urljoin(request.url, storename),
                                    l.get('size', None),
                                    l.get('type', OPUS_TYPE))
                elif l.rel == 'alternate' and 'href' in l:
                    entry.link(**l)

            for c in (e.content if 'content' in e else []):
                if 'type' in c and c.type.startswith('text/html'):
                    entry.content(content=c.value, type='html')
                else:
                    entry.content(content=c.value, type='text')

            entry.id(id)
            entry.title(e.get('title', None) or '???')
            entry.description(e.get('description', None) or '???')
            if 'updated_parsed' in e and e.updated_parsed:
                entry.updated(
                    datetime.fromtimestamp(mktime(e.updated_parsed), pytz.UTC))
            if 'published_parsed' in e and e.published_parsed:
                entry.published(
                    datetime.fromtimestamp(mktime(e.published_parsed),
                                           pytz.UTC))
        finally:
            pass

    try:
        resp = make_response(feed.rss_str(pretty=True))
        resp.headers['content-type'] = 'application/xml'
        return resp
    except BaseException as e:
        raise e
コード例 #33
0
async def channel(request, channel_id, return_type='video'):
    log.info(f'Channel: {channel_id}')
    channel_name = [f'{channel_id}/{return_type}']
    if channel_name[0] in channel_feed and channel_feed[
            channel_name[0]]['expire'] > datetime.now():
        return raw(channel_feed[channel_name[0]]['feed'],
                   content_type='application/rss+xml')
    fg = None
    calls = 0
    response = {'nextPageToken': ''}
    while 'nextPageToken' in response:
        next_page = response['nextPageToken']
        payload = {
            'part': 'snippet,contentDetails',
            'maxResults': 50,
            'channelId': channel_id,
            'key': KEY,
            'pageToken': next_page
        }
        response = json.loads(
            await get('https://www.googleapis.com/youtube/v3/activities',
                      params=payload))
        calls += 1
        if 'error' in response:
            payload = {
                'part': 'snippet',
                'maxResults': 1,
                'forUsername': channel_id,
                'key': KEY
            }
            response = json.loads(await get(
                'https://www.googleapis.com/youtube/v3/channels',
                params=payload))
            channel_id = response['items'][0]['id']
            channel_name.append(f'{channel_id}/{return_type}')
            payload = {
                'part': 'snippet,contentDetails',
                'maxResults': 50,
                'channelId': channel_id,
                'key': KEY,
                'pageToken': next_page
            }
            response = json.loads(await get(
                'https://www.googleapis.com/youtube/v3/activities',
                params=payload))
            calls += 2
        if not fg:
            fg = FeedGenerator()
            fg.load_extension('podcast')
            fg.generator('PodTube', __version__,
                         'https://github.com/aquacash5/PodTube')
            snippet = response['items'][0]['snippet']
            if 'Private' in snippet['title']:
                continue
            icon = max(snippet['thumbnails'],
                       key=lambda x: snippet['thumbnails'][x]['width'])
            fg.title(snippet['title'])
            fg.id(f'http://{request.headers["host"]}{request.url}')
            fg.description(snippet['description'] or ' ')
            fg.author(name=snippet['channelTitle'])
            fg.image(snippet['thumbnails'][icon]['url'])
            fg.link(href=f'https://www.youtube.com/playlist?list={channel_id}')
            fg.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
            fg.podcast.itunes_summary(snippet['description'])
            fg.podcast.itunes_category('Technology', 'Podcasting')
            fg.updated(f'{str(datetime.utcnow())}Z')
        for item in response['items']:
            snippet = item['snippet']
            if snippet['type'] != 'upload':
                continue
            current_video = item['contentDetails']['upload']['videoId']
            log.debug(f'ChannelVideo: {current_video} {snippet["title"]}')
            fe = fg.add_entry()
            fe.title(snippet['title'])
            fe.id(current_video)
            icon = max(snippet['thumbnails'],
                       key=lambda x: snippet['thumbnails'][x]['width'])
            fe.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
            fe.updated(snippet['publishedAt'])
            if return_type == 'audio':
                fe.enclosure(
                    url=
                    f'http://{request.headers["host"]}/audio/{current_video}',
                    type="audio/mpeg")
            else:
                fe.enclosure(
                    url=
                    f'http://{request.headers["host"]}/video/{current_video}',
                    type="video/mp4")
            fe.author(name=snippet['channelTitle'])
            fe.podcast.itunes_author(snippet['channelTitle'])
            fe.podcast.itunes_author(snippet['channelTitle'])
            fe.pubdate(snippet['publishedAt'])
            fe.link(href=f'http://www.youtube.com/watch?v={current_video}',
                    title=snippet['title'])
            fe.podcast.itunes_summary(snippet['description'])
            fe.description(snippet['description'])
            await sleep(0)
    feed = {
        'feed': fg.rss_str(),
        'expire': datetime.now() + timedelta(hours=calls)
    }
    for _name in channel_name:
        channel_feed[_name] = feed
    return raw(feed['feed'], content_type='application/rss+xml')
コード例 #34
0
ファイル: mtk-rss.py プロジェクト: n2ygk/mtk-rss
            fe.enclosure(file_url, 0, 'audio/mpeg')


fdr = 'https://fullduplexradio.us'
fg = FeedGenerator()
fg.load_extension('podcast')

fg.podcast.itunes_category('Music', 'Podcasting')
fg.title('Full Duplex Radio')
fg.description(
    "R&R play what they like, which is a lot. And they tell you about it.")
fg.link(link={'href': fdr})
myhost = socket.getfqdn("0.0.0.0")
# TODO: make this configurable
fg.image('https://*****:*****@{}:8080/FDR.jpg'.format(myhost),
         title='Full Duplex Radio',
         link=fdr)
local_tz = tz.tzlocal()
fg.lastBuildDate(datetime.now(tz=local_tz))
fg.rss_str(pretty=True)

response = requests.get(fdr)
if response.status_code == 200:
    rows = response.content.decode().split('\n')
    # '<a href="pl/FD406.html">Episode #406: Do You Know Any Nice Jewish Girls? (2020-11-07)</a>'
    for row in rows:
        match = re.match(
            r'<a href="(?P<rel>[^"]*)">Episode #(?P<ep>[0-9]+): (?P<title>.*) \((?P<date>.*)\)</a>',
            row)
        if match:
            add_episode(fg, match)
コード例 #35
0
ファイル: test_feed.py プロジェクト: lkiesow/python-feedgen
    def setUp(self):

        fg = FeedGenerator()

        self.nsAtom = "http://www.w3.org/2005/Atom"
        self.nsRss = "http://purl.org/rss/1.0/modules/content/"

        self.feedId = 'http://lernfunk.de/media/654321'
        self.title = 'Some Testfeed'

        self.authorName = 'John Doe'
        self.authorMail = '*****@*****.**'
        self.author = {'name': self.authorName, 'email': self.authorMail}

        self.linkHref = 'http://example.com'
        self.linkRel = 'alternate'

        self.logo = 'http://ex.com/logo.jpg'
        self.subtitle = 'This is a cool feed!'

        self.link2Href = 'http://larskiesow.de/test.atom'
        self.link2Rel = 'self'

        self.language = 'en'

        self.categoryTerm = 'This category term'
        self.categoryScheme = 'This category scheme'
        self.categoryLabel = 'This category label'

        self.cloudDomain = 'example.com'
        self.cloudPort = '4711'
        self.cloudPath = '/ws/example'
        self.cloudRegisterProcedure = 'registerProcedure'
        self.cloudProtocol = 'SOAP 1.1'

        self.icon = "http://example.com/icon.png"
        self.contributor = {'name': "Contributor Name",
                            'uri': "Contributor Uri",
                            'email': 'Contributor email'}
        self.copyright = "The copyright notice"
        self.docs = 'http://www.rssboard.org/rss-specification'
        self.managingEditor = '*****@*****.**'
        self.rating = '(PICS-1.1 "http://www.classify.org/safesurf/" ' + \
            '1 r (SS~~000 1))'
        self.skipDays = 'Tuesday'
        self.skipHours = 23

        self.textInputTitle = "Text input title"
        self.textInputDescription = "Text input description"
        self.textInputName = "Text input name"
        self.textInputLink = "Text input link"

        self.ttl = 900

        self.webMaster = '*****@*****.**'

        fg.id(self.feedId)
        fg.title(self.title)
        fg.author(self.author)
        fg.link(href=self.linkHref, rel=self.linkRel)
        fg.logo(self.logo)
        fg.subtitle(self.subtitle)
        fg.link(href=self.link2Href, rel=self.link2Rel)
        fg.language(self.language)
        fg.cloud(domain=self.cloudDomain, port=self.cloudPort,
                 path=self.cloudPath,
                 registerProcedure=self.cloudRegisterProcedure,
                 protocol=self.cloudProtocol)
        fg.icon(self.icon)
        fg.category(term=self.categoryTerm, scheme=self.categoryScheme,
                    label=self.categoryLabel)
        fg.contributor(self.contributor)
        fg.copyright(self.copyright)
        fg.docs(docs=self.docs)
        fg.managingEditor(self.managingEditor)
        fg.rating(self.rating)
        fg.skipDays(self.skipDays)
        fg.skipHours(self.skipHours)
        fg.textInput(title=self.textInputTitle,
                     description=self.textInputDescription,
                     name=self.textInputName, link=self.textInputLink)
        fg.ttl(self.ttl)
        fg.webMaster(self.webMaster)
        fg.updated('2017-02-05 13:26:58+01:00')
        fg.pubDate('2017-02-05 13:26:58+01:00')
        fg.generator('python-feedgen', 'x', uri='http://github.com/lkie...')
        fg.image(url=self.logo,
                 title=self.title,
                 link=self.link2Href,
                 width='123',
                 height='123',
                 description='Example Inage')

        self.fg = fg
コード例 #36
0
def create_podcast(title,
                   podcast_root,
                   podcast_folder=None,
                   toc_path=None,
                   html_root=r"https://students.cs.byu.edu/~tarch",
                   category="Literature",
                   description="N/A",
                   alphabetize=True,
                   image_link=None,
                   google_drive=True,
                   reverse_order=True,
                   name="podcast.xml",
                   output_folder_root=None,
                   rel_url=""):
    """
    Creates a .XML file of the podcast

    podcast_root: /home/pi/public_html/podcasts - needed to calculate podcast folder relative to root so URLs are correct
    podcast_folder: /home/pi/public_html/podcasts/Brandon Sanderson - Infinity Blade Redemption (Unabridged)

    output_folder_root: usually podcast folder, could be somewhere else though;
    rel_url: /podcasts - IDK why this is needed, apparently you have TOPLEVEL/rel_url/[path to podcast]
    """
    if VERBOSE:
        print("ROOT:", podcast_root, "\nFolder:", podcast_folder)

    # With reverse order, we make "Chapter 1" be the most recent entry
    # Open CSV
    if not podcast_folder:
        podcast_folder = Path(podcast_root) / title
    if not toc_path:
        toc_path = Path(podcast_folder) / "TOC.csv"
    if not output_folder_root:
        output_folder_root = podcast_root

    episode_list = open_csv_as_dict(toc_path)

    #Create RSS feed
    fg = FeedGenerator()
    fg.load_extension('podcast')
    fg.podcast.itunes_category(category)
    fg.id(title.replace(" ", ""))
    fg.title(title)
    fg.author({'name': 'TheFrostyBoss', 'email': '*****@*****.**'})
    fg.link(href="taylorarchibald.com", rel='alternate')
    #fg.subtitle('This is a cool feed!')
    fg.description(description)
    fg.podcast.itunes_block(True)

    # Sort the list
    if alphabetize:
        episode_list = sorted(episode_list,
                              key=lambda x: add_zero_to_chapter(x["Title"]))
        #print(episode_list)
    if reverse_order:
        episode_list = episode_list[::-1]

    for i, episode in enumerate(episode_list):
        add_episode(fg,
                    episode["Link"],
                    episode["Title"],
                    episode["Series"],
                    episode["Image"],
                    index=len(episode_list) - i - 1)

        # DEBUG SPECIFIC EPISODE
        #if "good" in episode["Title"].lower():
        #    print(id, title, description, episode)
        #    input()

    image_url = image_link if not image_link is None else episode["Image"]
    fg.image(url=image_url,
             title=None,
             link=None,
             width=None,
             height=None,
             description=None)

    fg.rss_str(pretty=True)

    # Add podcast name to path, create if needed
    relative_path = Path(podcast_folder).relative_to(podcast_root) / name

    output = Path(output_folder_root) / relative_path
    output.parent.mkdir(exist_ok=True, parents=True)
    fg.rss_file(str(output))

    if google_drive:
        link1 = input(
            "Upload your podcast XML to Google drive. What is the download link for the podcast.xml? (it should have id= somewhere in the link)"
        )
        print(convert_link2(link1))
    else:
        print("Link: ",
              Path(html_root) / rel_url / url_quote(relative_path.as_posix()))

    return output
コード例 #37
0
ファイル: feed.py プロジェクト: jkalamarz/radio_archive
            return stat.st_mtime


fg = FeedGenerator()
fg.load_extension('podcast')
fg.language('pl')
fg.podcast.itunes_explicit('no')

if (len(sys.argv) > 1 and sys.argv[1] == '3'):
    fg.title(u'Trójka')
    fg.podcast.itunes_author(u'Trójka')
    fg.link(href='https://www.polskieradio.pl/9,Trojka', rel='alternate')
    fg.subtitle(u'Nieoficjalny podcast Trójki')
    fg.copyright('cc-by-PolskieRadio')
    fg.podcast.itunes_summary(u'Podcast Trójki')
    fg.image('https://www.simx.mobi/trojka/trojka.jpg')
    fg.podcast.itunes_image('https://www.simx.mobi/trojka/trojka.jpg')
    fg.podcast.itunes_category('International', 'Polish')
    url = u'https://www.simx.mobi/trojka/'
else:
    fg.title(u'Weszło FM')
    fg.podcast.itunes_author(u'Weszło FM')
    fg.link(href='http://weszlo.fm/', rel='alternate')
    fg.subtitle(u'Nieoficjalny podcast WeszłoFM')
    fg.copyright('cc-by-Weszlo')
    fg.podcast.itunes_summary(u'Podcast WeszłoFM')
    fg.podcast.itunes_owner('Krzysztof Stanowski',
                            '*****@*****.**')
    fg.image('https://i1.sndcdn.com/avatars-000421118988-38c4cq-t200x200.jpg')
    fg.podcast.itunes_image(
        'https://i1.sndcdn.com/avatars-000421118988-38c4cq-t200x200.jpg')
コード例 #38
0
ファイル: podtube.py プロジェクト: kaesi0/PodTube
 def get(self, channel):
     channel = channel.split('/')
     if len(channel) < 2:
         channel.append('video')
     channel_name = ['/'.join(channel)]
     self.set_header('Content-type', 'application/rss+xml')
     if channel_name[0] in channel_feed and channel_feed[
             channel_name[0]]['expire'] > datetime.datetime.now():
         self.write(channel_feed[channel_name[0]]['feed'])
         self.finish()
         return
     fg = None
     video = None
     calls = 0
     response = {'nextPageToken': ''}
     while 'nextPageToken' in response.keys():
         next_page = response['nextPageToken']
         payload = {
             'part': 'snippet,contentDetails',
             'maxResults': 50,
             'channelId': channel[0],
             'key': key,
             'pageToken': next_page
         }
         request = requests.get(
             'https://www.googleapis.com/youtube/v3/activities',
             params=payload)
         calls += 1
         if request.status_code != 200:
             payload = {
                 'part': 'snippet',
                 'maxResults': 1,
                 'forUsername': channel[0],
                 'key': key
             }
             request = requests.get(
                 'https://www.googleapis.com/youtube/v3/channels',
                 params=payload)
             response = request.json()
             channel[0] = response['items'][0]['id']
             channel_name.append('/'.join(channel))
             payload = {
                 'part': 'snippet,contentDetails',
                 'maxResults': 50,
                 'channelId': channel[0],
                 'key': key,
                 'pageToken': next_page
             }
             request = requests.get(
                 'https://www.googleapis.com/youtube/v3/activities',
                 params=payload)
             calls += 2
         response = request.json()
         if request.status_code == 200:
             logging.debug('Downloaded Channel Information')
         else:
             logging.error('Error Downloading Channel: %s', request.reason)
             self.send_error(reason='Error Downloading Channel')
             return
         if not fg:
             fg = FeedGenerator()
             fg.load_extension('podcast')
             fg.generator('PodTube (python-feedgen)', __version__,
                          'https://github.com/aquacash5/PodTube')
             for item in response['items']:
                 if item['snippet']['type'] != 'upload':
                     continue
                 elif 'Private' in item['snippet']['title']:
                     continue
                 else:
                     snippet = item['snippet']
                     break
             logging.info('Channel: %s (%s)', channel[0],
                          snippet['channelTitle'])
             icon = max(snippet['thumbnails'],
                        key=lambda x: snippet['thumbnails'][x]['width'])
             fg.title(snippet['channelTitle'])
             fg.id('http://' + self.request.host + self.request.uri)
             fg.description(snippet['description'] or ' ')
             fg.author(name='Podtube',
                       email='*****@*****.**',
                       uri='https://github.com/aquacash5/PodTube')
             fg.podcast.itunes_author(snippet['channelTitle'])
             fg.image(snippet['thumbnails'][icon]['url'])
             fg.link(href=f'http://youtube.com/channel/{channel}',
                     rel='self')
             fg.language('en-US')
             fg.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
             fg.podcast.itunes_explicit('no')
             fg.podcast.itunes_owner(name='Podtube',
                                     email='*****@*****.**')
             fg.podcast.itunes_summary(snippet['description'])
             fg.podcast.itunes_category(cat='Technology')
             fg.updated(str(datetime.datetime.utcnow()) + 'Z')
         for item in response['items']:
             snippet = item['snippet']
             if snippet['type'] != 'upload':
                 continue
             if 'private' in snippet['title'].lower():
                 continue
             current_video = item['contentDetails']['upload']['videoId']
             logging.debug('ChannelVideo: %s (%s)', current_video,
                           snippet['title'])
             fe = fg.add_entry()
             fe.title(snippet['title'])
             fe.id(current_video)
             icon = max(snippet['thumbnails'],
                        key=lambda x: snippet['thumbnails'][x]['width'])
             fe.podcast.itunes_image(snippet['thumbnails'][icon]['url'])
             fe.updated(snippet['publishedAt'])
             if channel[1] == 'video':
                 fe.enclosure(
                     url=f'http://{self.request.host}/video/{current_video}',
                     type="video/mp4")
             elif channel[1] == 'audio':
                 fe.enclosure(
                     url=f'http://{self.request.host}/audio/{current_video}',
                     type="audio/mpeg")
             fe.author(name=snippet['channelTitle'])
             fe.podcast.itunes_author(snippet['channelTitle'])
             fe.pubDate(snippet['publishedAt'])
             fe.link(href=f'http://www.youtube.com/watch?v={current_video}',
                     title=snippet['title'])
             fe.podcast.itunes_summary(snippet['description'])
             fe.description(snippet['description'])
             if not video or video['expire'] < fe.pubDate():
                 video = {'video': fe.id(), 'expire': fe.pubDate()}
     feed = {
         'feed': fg.rss_str(),
         'expire': datetime.datetime.now() + datetime.timedelta(hours=calls)
     }
     for chan in channel_name:
         channel_feed[chan] = feed
     self.write(feed['feed'])
     self.finish()
     video = video['video']
     mp3_file = 'audio/{}.mp3'.format(video)
     if channel[1] == 'audio' and not os.path.exists(
             mp3_file) and video not in conversion_queue.keys():
         conversion_queue[video] = {
             'status': False,
             'added': datetime.datetime.now()
         }
コード例 #39
0
ファイル: test_feed.py プロジェクト: rachmann/python-feedgen
    def setUp(self):

        fg = FeedGenerator()

        self.nsAtom = "http://www.w3.org/2005/Atom"
        self.nsRss = "http://purl.org/rss/1.0/modules/content/"

        self.feedId = 'http://lernfunk.de/media/654321'
        self.title = 'Some Testfeed'

        self.authorName = 'John Doe'
        self.authorMail = '*****@*****.**'
        self.author = {'name': self.authorName, 'email': self.authorMail}

        self.linkHref = 'http://example.com'
        self.linkRel = 'alternate'

        self.logo = 'http://ex.com/logo.jpg'
        self.subtitle = 'This is a cool feed!'

        self.link2Href = 'http://larskiesow.de/test.atom'
        self.link2Rel = 'self'

        self.language = 'en'

        self.categoryTerm = 'This category term'
        self.categoryScheme = 'This category scheme'
        self.categoryLabel = 'This category label'

        self.cloudDomain = 'example.com'
        self.cloudPort = '4711'
        self.cloudPath = '/ws/example'
        self.cloudRegisterProcedure = 'registerProcedure'
        self.cloudProtocol = 'SOAP 1.1'

        self.icon = "http://example.com/icon.png"
        self.contributor = {
            'name': "Contributor Name",
            'uri': "Contributor Uri",
            'email': 'Contributor email'
        }
        self.copyright = "The copyright notice"
        self.docs = 'http://www.rssboard.org/rss-specification'
        self.managingEditor = '*****@*****.**'
        self.rating = '(PICS-1.1 "http://www.classify.org/safesurf/" ' + \
            '1 r (SS~~000 1))'
        self.skipDays = 'Tuesday'
        self.skipHours = 23

        self.textInputTitle = "Text input title"
        self.textInputDescription = "Text input description"
        self.textInputName = "Text input name"
        self.textInputLink = "Text input link"

        self.ttl = 900

        self.webMaster = '*****@*****.**'

        fg.id(self.feedId)
        fg.title(self.title)
        fg.author(self.author)
        fg.link(href=self.linkHref, rel=self.linkRel)
        fg.logo(self.logo)
        fg.subtitle(self.subtitle)
        fg.link(href=self.link2Href, rel=self.link2Rel)
        fg.language(self.language)
        fg.cloud(domain=self.cloudDomain,
                 port=self.cloudPort,
                 path=self.cloudPath,
                 registerProcedure=self.cloudRegisterProcedure,
                 protocol=self.cloudProtocol)
        fg.icon(self.icon)
        fg.category(term=self.categoryTerm,
                    scheme=self.categoryScheme,
                    label=self.categoryLabel)
        fg.contributor(self.contributor)
        fg.copyright(self.copyright)
        fg.docs(docs=self.docs)
        fg.managingEditor(self.managingEditor)
        fg.rating(self.rating)
        fg.skipDays(self.skipDays)
        fg.skipHours(self.skipHours)
        fg.textInput(title=self.textInputTitle,
                     description=self.textInputDescription,
                     name=self.textInputName,
                     link=self.textInputLink)
        fg.ttl(self.ttl)
        fg.webMaster(self.webMaster)
        fg.updated('2017-02-05 13:26:58+01:00')
        fg.pubDate('2017-02-05 13:26:58+01:00')
        fg.generator('python-feedgen', 'x', uri='http://github.com/lkie...')
        fg.image(url=self.logo,
                 title=self.title,
                 link=self.link2Href,
                 width='123',
                 height='123',
                 description='Example Inage')

        self.fg = fg