Beispiel #1
0
 def get_content(self, params):
     if 'body-html' in params:
         return params['body-html']
     if 'stripped-html' in params:
         return linkify(linebreaks(params['stripped-html']))
     if 'body-plain' in params:
         return linkify(linebreaks(params['body-plain']))
Beispiel #2
0
 def get_content(self, params):
     if 'body-html' in params:
         return params['body-html']
     if 'stripped-html' in params:
         return linkify(linebreaks(params['stripped-html']))
     if 'body-plain' in params:
         return linkify(linebreaks(params['body-plain']))
Beispiel #3
0
 def get_content(self, params):
     if "body-html" in params:
         return params["body-html"]
     if "stripped-html" in params:
         return linkify(linebreaks(params["stripped-html"]))
     if "body-plain" in params:
         return linkify(linebreaks(params["body-plain"]))
Beispiel #4
0
 def _get_content(self, params, force_plain=False):
     if 'body-enriched' in params and not force_plain:
         return params['body-enriched']
     if 'body-html' in params and not force_plain:
         return params['body-html']
     if 'stripped-html' in params and not force_plain:
         return params['stripped-html']
     if 'body-plain' in params:
         return linkify(linebreaks(params['body-plain']))
Beispiel #5
0
 def _get_content(self, params, force_plain=False):
     if 'body-enriched' in params and not force_plain:
         return params['body-enriched']
     if 'body-html' in params and not force_plain:
         return params['body-html']
     if 'stripped-html' in params and not force_plain:
         return params['stripped-html']
     if 'body-plain' in params:
         return linkify(linebreaks(params['body-plain']))
Beispiel #6
0
    def fetch_youtube(self, address):
        username = None
        channel_id = None
        list_id = None
        
        if 'gdata.youtube.com' in address:
            try:
                username_groups = re.search('gdata.youtube.com/feeds/\w+/users/(\w+)/', address)
                if not username_groups:
                    return
                username = username_groups.group(1)
            except IndexError:
                return
        elif 'youtube.com/feeds/videos.xml?user='******'user'][0]
            except IndexError:
                return            
        elif 'youtube.com/feeds/videos.xml?channel_id=' in address:
            try:
                channel_id = urlparse.parse_qs(urlparse.urlparse(address).query)['channel_id'][0]
            except IndexError:
                return            
        elif 'youtube.com/playlist' in address:
            try:
                list_id = urlparse.parse_qs(urlparse.urlparse(address).query)['list'][0]
            except IndexError:
                return            
        
        if channel_id:
            video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?channel_id=%s" % channel_id)
            channel_json = requests.get("https://www.googleapis.com/youtube/v3/channels?part=snippet&id=%s&key=%s" %
                                       (channel_id, settings.YOUTUBE_API_KEY))
            channel = json.decode(channel_json.content)
            try:
                username = channel['items'][0]['snippet']['title']
                description = channel['items'][0]['snippet']['description']
            except (IndexError, KeyError):
                return
        elif list_id:
            playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlists?part=snippet&id=%s&key=%s" %
                                       (list_id, settings.YOUTUBE_API_KEY))
            playlist = json.decode(playlist_json.content)
            try:
                username = playlist['items'][0]['snippet']['title']
                description = playlist['items'][0]['snippet']['description']
            except (IndexError, KeyError):
                return
            channel_url = "https://www.youtube.com/playlist?list=%s" % list_id
        elif username:
            video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?user=%s" % username)
            description = "YouTube videos uploaded by %s" % username
        else:
            return
                    
        if list_id:
            playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=%s&key=%s" %
                                       (list_id, settings.YOUTUBE_API_KEY))
            playlist = json.decode(playlist_json.content)
            try:
                video_ids = [video['snippet']['resourceId']['videoId'] for video in playlist['items']]
            except (IndexError, KeyError):
                return
        else:    
            if video_ids_xml.status_code != 200:
                return
            video_ids_soup = BeautifulSoup(video_ids_xml.content)
            channel_url = video_ids_soup.find('author').find('uri').getText()
            video_ids = []
            for video_id in video_ids_soup.findAll('yt:videoid'):
                video_ids.append(video_id.getText())
        
        videos_json = requests.get("https://www.googleapis.com/youtube/v3/videos?part=contentDetails%%2Csnippet&id=%s&key=%s" %
             (','.join(video_ids), settings.YOUTUBE_API_KEY))
        videos = json.decode(videos_json.content)
        if 'error' in videos:
            logging.debug(" ***> ~FRYoutube returned an error: ~FM~SB%s" % (videos))
            return
            
        data = {}
        data['title'] = ("%s's YouTube Videos" % username if 'Uploads' not in username else username)
        data['link'] = channel_url
        data['description'] = description
        data['lastBuildDate'] = datetime.datetime.utcnow()
        data['generator'] = 'NewsBlur YouTube API v3 Decrapifier - %s' % settings.NEWSBLUR_URL
        data['docs'] = None
        data['feed_url'] = address
        rss = feedgenerator.Atom1Feed(**data)
        
        for video in videos['items']:
            thumbnail = video['snippet']['thumbnails'].get('maxres')
            if not thumbnail:
                thumbnail = video['snippet']['thumbnails'].get('high')
            if not thumbnail:
                thumbnail = video['snippet']['thumbnails'].get('medium')
            duration_sec = isodate.parse_duration(video['contentDetails']['duration']).seconds
            if duration_sec >= 3600:
                hours = (duration_sec / 3600)
                minutes = (duration_sec - (hours*3600)) / 60
                seconds = duration_sec - (hours*3600) - (minutes*60)
                duration = "%s:%s:%s" % (hours, '{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
            else:
                minutes = duration_sec / 60
                seconds = duration_sec - (minutes*60)
                duration = "%s:%s" % ('{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
            content = """<div class="NB-youtube-player"><iframe allowfullscreen="true" src="%s?iv_load_policy=3"></iframe></div>
                         <div class="NB-youtube-stats"><small>
                             <b>From:</b> <a href="%s">%s</a><br />
                             <b>Duration:</b> %s<br />
                         </small></div><hr>
                         <div class="NB-youtube-description">%s</div>
                         <img src="%s" style="display:none" />""" % (
                ("https://www.youtube.com/embed/" + video['id']),
                channel_url, username,
                duration,
                linkify(linebreaks(video['snippet']['description'])),
                thumbnail['url'] if thumbnail else "",
            )

            link = "http://www.youtube.com/watch?v=%s" % video['id']
            story_data = {
                'title': video['snippet']['title'],
                'link': link,
                'description': content,
                'author_name': username,
                'categories': [],
                'unique_id': "tag:youtube.com,2008:video:%s" % video['id'],
                'pubdate': dateutil.parser.parse(video['snippet']['publishedAt']),
            }
            rss.add_item(**story_data)
        
        return rss.writeString('utf-8')
Beispiel #7
0
                if 'media_url_https' not in media: continue
                if media['type'] == 'photo':
                    entities += "<img src=\"%s\"> " % media['media_url_https']
                    if 'photo' not in categories:
                        categories.add('photo')

            content = """<div class="NB-twitter-rss">
                             <div class="NB-twitter-rss-tweet">%s</div><hr />
                             <div class="NB-twitter-rss-entities">%s</div>
                             <div class="NB-twitter-rss-author">
                                 Posted by
                                     <a href="https://twitter.com/%s"><img src="%s" style="height: 32px" /> %s</a>
                                on %s.</div>
                             <div class="NB-twitter-rss-stats">%s %s%s %s</div>
                        </div>""" % (
                linkify(linebreaks(tweet.text)),
                entities,
                username,
                tweet.user.profile_image_url_https,
                username,
                tweet.created_at.strftime("%c"),
                ("<br /><br />" if tweet.favorite_count or tweet.retweet_count else ""),
                ("<b>%s</b> %s" % (tweet.favorite_count, "like" if tweet.favorite_count == 1 else "likes")) if tweet.favorite_count else "",
                (", " if tweet.favorite_count and tweet.retweet_count else ""),
                ("<b>%s</b> %s" % (tweet.retweet_count, "retweet" if tweet.retweet_count == 1 else "retweets")) if tweet.retweet_count else "",
            )
            
            if tweet.text.startswith('RT @'):
                categories.add('retweet')
            elif tweet.in_reply_to_status_id or tweet.text.startswith('@'):
                categories.add('reply')
Beispiel #8
0
    def fetch_youtube(self, address):
        username = None
        channel_id = None
        list_id = None
        
        if 'gdata.youtube.com' in address:
            try:
                username_groups = re.search('gdata.youtube.com/feeds/\w+/users/(\w+)/', address)
                if not username_groups:
                    return
                username = username_groups.group(1)
            except IndexError:
                return
        elif 'youtube.com/feeds/videos.xml?user='******'user'][0]
            except IndexError:
                return            
        elif 'youtube.com/feeds/videos.xml?channel_id=' in address:
            try:
                channel_id = urlparse.parse_qs(urlparse.urlparse(address).query)['channel_id'][0]
            except IndexError:
                return            
        elif 'youtube.com/playlist' in address:
            try:
                list_id = urlparse.parse_qs(urlparse.urlparse(address).query)['list'][0]
            except IndexError:
                return            
        
        if channel_id:
            video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?channel_id=%s" % channel_id)
            channel_json = requests.get("https://www.googleapis.com/youtube/v3/channels?part=snippet&id=%s&key=%s" %
                                       (channel_id, settings.YOUTUBE_API_KEY))
            channel = json.decode(channel_json.content)
            try:
                username = channel['items'][0]['snippet']['title']
                description = channel['items'][0]['snippet']['description']
            except (IndexError, KeyError):
                return
        elif list_id:
            playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlists?part=snippet&id=%s&key=%s" %
                                       (list_id, settings.YOUTUBE_API_KEY))
            playlist = json.decode(playlist_json.content)
            try:
                username = playlist['items'][0]['snippet']['title']
                description = playlist['items'][0]['snippet']['description']
            except (IndexError, KeyError):
                return
            channel_url = "https://www.youtube.com/playlist?list=%s" % list_id
        elif username:
            video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?user=%s" % username)
            description = "YouTube videos uploaded by %s" % username
        else:
            return
                    
        if list_id:
            playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=%s&key=%s" %
                                       (list_id, settings.YOUTUBE_API_KEY))
            playlist = json.decode(playlist_json.content)
            try:
                video_ids = [video['snippet']['resourceId']['videoId'] for video in playlist['items']]
            except (IndexError, KeyError):
                return
        else:    
            if video_ids_xml.status_code != 200:
                return
            video_ids_soup = BeautifulSoup(video_ids_xml.content)
            channel_url = video_ids_soup.find('author').find('uri').getText()
            video_ids = []
            for video_id in video_ids_soup.findAll('yt:videoid'):
                video_ids.append(video_id.getText())
        
        videos_json = requests.get("https://www.googleapis.com/youtube/v3/videos?part=contentDetails%%2Csnippet&id=%s&key=%s" %
             (','.join(video_ids), settings.YOUTUBE_API_KEY))
        videos = json.decode(videos_json.content)

        data = {}
        data['title'] = ("%s's YouTube Videos" % username if 'Uploads' not in username else username)
        data['link'] = channel_url
        data['description'] = description
        data['lastBuildDate'] = datetime.datetime.utcnow()
        data['generator'] = 'NewsBlur YouTube API v3 Decrapifier - %s' % settings.NEWSBLUR_URL
        data['docs'] = None
        data['feed_url'] = address
        rss = feedgenerator.Atom1Feed(**data)

        for video in videos['items']:
            thumbnail = video['snippet']['thumbnails'].get('maxres')
            if not thumbnail:
                thumbnail = video['snippet']['thumbnails'].get('high')
            if not thumbnail:
                thumbnail = video['snippet']['thumbnails'].get('medium')
            duration_sec = isodate.parse_duration(video['contentDetails']['duration']).seconds
            if duration_sec >= 3600:
                hours = (duration_sec / 3600)
                minutes = (duration_sec - (hours*3600)) / 60
                seconds = duration_sec - (hours*3600) - (minutes*60)
                duration = "%s:%s:%s" % (hours, '{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
            else:
                minutes = duration_sec / 60
                seconds = duration_sec - (minutes*60)
                duration = "%s:%s" % ('{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
            content = """<div class="NB-youtube-player"><iframe allowfullscreen="true" src="%s?iv_load_policy=3"></iframe></div>
                         <div class="NB-youtube-stats"><small>
                             <b>From:</b> <a href="%s">%s</a><br />
                             <b>Duration:</b> %s<br />
                         </small></div><hr>
                         <div class="NB-youtube-description">%s</div>
                         <img src="%s" style="display:none" />""" % (
                ("https://www.youtube.com/embed/" + video['id']),
                channel_url, username,
                duration,
                linkify(linebreaks(video['snippet']['description'])),
                thumbnail['url'] if thumbnail else "",
            )

            link = "http://www.youtube.com/watch?v=%s" % video['id']
            story_data = {
                'title': video['snippet']['title'],
                'link': link,
                'description': content,
                'author_name': username,
                'categories': [],
                'unique_id': "tag:youtube.com,2008:video:%s" % video['id'],
                'pubdate': dateutil.parser.parse(video['snippet']['publishedAt']),
            }
            rss.add_item(**story_data)
        
        return rss.writeString('utf-8')
Beispiel #9
0
    def fetch_youtube(self, address):
        username = None
        channel_id = None
        list_id = None

        if "gdata.youtube.com" in address:
            try:
                username_groups = re.search("gdata.youtube.com/feeds/\w+/users/(\w+)/", address)
                if not username_groups:
                    return
                username = username_groups.group(1)
            except IndexError:
                return
        elif "youtube.com/feeds/videos.xml?user="******"user"][0]
            except IndexError:
                return
        elif "youtube.com/feeds/videos.xml?channel_id=" in address:
            try:
                channel_id = urlparse.parse_qs(urlparse.urlparse(address).query)["channel_id"][0]
            except IndexError:
                return
        elif "youtube.com/playlist" in address:
            try:
                list_id = urlparse.parse_qs(urlparse.urlparse(address).query)["list"][0]
            except IndexError:
                return

        if channel_id:
            video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?channel_id=%s" % channel_id)
            channel_json = requests.get(
                "https://www.googleapis.com/youtube/v3/channels?part=snippet&id=%s&key=%s"
                % (channel_id, settings.YOUTUBE_API_KEY)
            )
            channel = json.decode(channel_json.content)
            try:
                username = channel["items"][0]["snippet"]["title"]
                description = channel["items"][0]["snippet"]["description"]
            except IndexError:
                return
        elif list_id:
            playlist_json = requests.get(
                "https://www.googleapis.com/youtube/v3/playlists?part=snippet&id=%s&key=%s"
                % (list_id, settings.YOUTUBE_API_KEY)
            )
            playlist = json.decode(playlist_json.content)
            try:
                username = playlist["items"][0]["snippet"]["title"]
                description = playlist["items"][0]["snippet"]["description"]
            except IndexError:
                return
            channel_url = "https://www.youtube.com/playlist?list=%s" % list_id
        elif username:
            video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?user=%s" % username)
            description = "YouTube videos uploaded by %s" % username
        else:
            return

        if list_id:
            playlist_json = requests.get(
                "https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=%s&key=%s"
                % (list_id, settings.YOUTUBE_API_KEY)
            )
            playlist = json.decode(playlist_json.content)
            try:
                video_ids = [video["snippet"]["resourceId"]["videoId"] for video in playlist["items"]]
            except IndexError:
                return
        else:
            if video_ids_xml.status_code != 200:
                return
            video_ids_soup = BeautifulSoup(video_ids_xml.content)
            channel_url = video_ids_soup.find("author").find("uri").getText()
            video_ids = []
            for video_id in video_ids_soup.findAll("yt:videoid"):
                video_ids.append(video_id.getText())

        videos_json = requests.get(
            "https://www.googleapis.com/youtube/v3/videos?part=contentDetails%%2Csnippet&id=%s&key=%s"
            % (",".join(video_ids), settings.YOUTUBE_API_KEY)
        )
        videos = json.decode(videos_json.content)

        data = {}
        data["title"] = "%s's YouTube Videos" % username if "Uploads" not in username else username
        data["link"] = channel_url
        data["description"] = description
        data["lastBuildDate"] = datetime.datetime.utcnow()
        data["generator"] = "NewsBlur YouTube API v3 Decrapifier - %s" % settings.NEWSBLUR_URL
        data["docs"] = None
        data["feed_url"] = address
        rss = feedgenerator.Atom1Feed(**data)

        for video in videos["items"]:
            thumbnail = video["snippet"]["thumbnails"].get("maxres")
            if not thumbnail:
                thumbnail = video["snippet"]["thumbnails"].get("high")
            if not thumbnail:
                thumbnail = video["snippet"]["thumbnails"].get("medium")
            duration_sec = isodate.parse_duration(video["contentDetails"]["duration"]).seconds
            if duration_sec >= 3600:
                hours = duration_sec / 3600
                minutes = (duration_sec - (hours * 3600)) / 60
                seconds = duration_sec - (hours * 3600) - (minutes * 60)
                duration = "%s:%s:%s" % (hours, "{0:02d}".format(minutes), "{0:02d}".format(seconds))
            else:
                minutes = duration_sec / 60
                seconds = duration_sec - (minutes * 60)
                duration = "%s:%s" % ("{0:02d}".format(minutes), "{0:02d}".format(seconds))
            content = """<div class="NB-youtube-player"><iframe allowfullscreen="true" src="%s"></iframe></div>
                         <div class="NB-youtube-stats"><small>
                             <b>From:</b> <a href="%s">%s</a><br />
                             <b>Duration:</b> %s<br />
                         </small></div><hr>
                         <div class="NB-youtube-description">%s</div>
                         <img src="%s" style="display:none" />""" % (
                ("https://www.youtube.com/embed/" + video["id"]),
                channel_url,
                username,
                duration,
                linkify(linebreaks(video["snippet"]["description"])),
                thumbnail["url"] if thumbnail else "",
            )

            link = "http://www.youtube.com/watch?v=%s" % video["id"]
            story_data = {
                "title": video["snippet"]["title"],
                "link": link,
                "description": content,
                "author_name": username,
                "categories": [],
                "unique_id": "tag:youtube.com,2008:video:%s" % video["id"],
                "pubdate": dateutil.parser.parse(video["snippet"]["publishedAt"]),
            }
            rss.add_item(**story_data)

        return rss.writeString("utf-8")
Beispiel #10
0
                if 'media_url_https' not in media: continue
                if media['type'] == 'photo':
                    entities += "<img src=\"%s\"> " % media['media_url_https']
                    if 'photo' not in categories:
                        categories.append('photo')

            content = """<div class="NB-twitter-rss">
                             <div class="NB-twitter-rss-tweet">%s</div><hr />
                             <div class="NB-twitter-rss-entities">%s</div>
                             <div class="NB-twitter-rss-author">
                                 Posted by
                                     <a href="https://twitter.com/%s"><img src="%s" style="height: 32px" /> %s</a>
                                on %s.</div>
                             <div class="NB-twitter-rss-stats">%s %s%s %s</div>
                        </div>""" % (
                linkify(linebreaks(tweet.text)),
                entities,
                username,
                tweet.user.profile_image_url_https,
                username,
                tweet.created_at.strftime("%c"),
                ("<br /><br />" if tweet.favorite_count or tweet.retweet_count else ""),
                ("<b>%s</b> %s" % (tweet.favorite_count, "like" if tweet.favorite_count == 1 else "likes")) if tweet.favorite_count else "",
                (", " if tweet.favorite_count and tweet.retweet_count else ""),
                ("<b>%s</b> %s" % (tweet.retweet_count, "retweet" if tweet.retweet_count == 1 else "retweets")) if tweet.retweet_count else "",
            )
            
            if tweet.text.startswith('RT @'):
                categories.append('retweet')
            elif tweet.in_reply_to_status_id:
                categories.append('reply')