Пример #1
0
def revenue(request):
    data = {}
    data['title'] = "NewsBlur Revenue"
    data['link'] = "https://www.newsblur.com"
    data['description'] = "Revenue"
    data['lastBuildDate'] = datetime.datetime.utcnow()
    data['generator'] = 'NewsBlur Revenue Writer'
    data['docs'] = None
    rss = feedgenerator.Atom1Feed(**data)

    report = PaymentHistory.report()
    content = "%s revenue: $%s<br><code>%s</code>" % (datetime.datetime.now(
    ).strftime('%Y'), report['annual'], report['output'].replace('\n', '<br>'))

    story = {
        'title':
        "Daily snapshot: %s" %
        (datetime.datetime.now().strftime('%a %b %-d, %Y')),
        'link':
        'https://www.newsblur.com',
        'description':
        content,
        'unique_id':
        datetime.datetime.now().strftime('%a %b %-d, %Y'),
        'pubdate':
        datetime.datetime.now(),
    }
    rss.add_item(**story)

    logging.user(
        request, "~FBGenerating Revenue RSS feed: ~FM%s" %
        (request.META.get('HTTP_USER_AGENT', "")[:24]))
    return HttpResponse(rss.writeString('utf-8'),
                        content_type='application/rss+xml')
Пример #2
0
    def fetch(self, address=None):
        if not address:
            address = self.feed.feed_address
        self.address = address
        twitter_user = None

        username = self.extract_username()
        if not username:
            return

        twitter_user = self.fetch_user(username)
        if not twitter_user:
            return

        tweets = self.user_timeline(twitter_user)

        data = {}
        data['title'] = "%s on Twitter" % username
        data['link'] = "https://twitter.com/%s" % username
        data['description'] = "%s on Twitter" % username
        data['lastBuildDate'] = datetime.datetime.utcnow()
        data[
            'generator'] = 'NewsBlur Twitter API Decrapifier - %s' % settings.NEWSBLUR_URL
        data['docs'] = None
        data['feed_url'] = address
        rss = feedgenerator.Atom1Feed(**data)

        for tweet in tweets:
            story_data = self.tweet_story(tweet.__dict__)
            rss.add_item(**story_data)

        return rss.writeString('utf-8')
Пример #3
0
    def to_rss(self, data, options=None):

        if not options or options == {}:
            options['title'] = 'default feed title'
            options['link'] = 'default feed link'
            options['description'] = 'default feed description'

        data = self.to_simple(data, options)

        #*****************************************************
        # Subclass the serialize function in your api
        # to build options which get passed into here.
        #*****************************************************

        # Feed level info
        feed = feedgenerator.Atom1Feed(title=unicode(options['title']),
                                       link=unicode(options['link']),
                                       description=unicode(
                                           options['description']))

        #TODO: Add in classification abbreviation into the feed
        for item in data['objects']:
            feed.add_item(unique_id=item['id'],
                          title=self.format_text(item['classification_short'],
                                                 item['title']),
                          description=self.format_text(
                              item['classification_short'],
                              item['description']),
                          link=self.get_front_end_url(item['id']),
                          pubdate=self.get_iso_dtg(item['modified']),
                          author_name=item['contributor_name'])

        return feed.writeString('utf-8')
Пример #4
0
    def fetch(self, address, raw_feed):
        if not address:
            address = self.feed.feed_address

        json_feed = decode(raw_feed.content)
        if not json_feed:
            logging.debug(u'   ***> [%-30s] ~FRJSON fetch failed: %s' %
                          (self.feed.log_title[:30], address))
            return

        data = {}
        data['title'] = json_feed.get('title', '[Untitled]')
        data['link'] = json_feed.get('home_page_url', None)
        data['description'] = json_feed.get('title', "")
        data['lastBuildDate'] = datetime.datetime.utcnow()
        data['generator'] = 'NewsBlur JSON Feed - %s' % settings.NEWSBLUR_URL
        data['docs'] = None
        data['feed_url'] = json_feed.get('feed_url')

        rss = feedgenerator.Atom1Feed(**data)

        for item in json_feed.get('items', []):
            story_data = self.json_feed_story(item)
            rss.add_item(**story_data)

        return rss.writeString('utf-8')
Пример #5
0
def feed(context):
    feed = feedgenerator.Atom1Feed(
        u'Сказка: Новости',
        context.django_request.build_absolute_uri('/'),
        u'Новости мморпг «Сказка»',
        language=u'ru',
        feed_url=context.django_request.build_absolute_uri(url('news:feed')))

    news = logic.load_news_from_query(
        models.News.objects.order_by('-created_at')
        [:conf.settings.FEED_ITEMS_NUMBER])

    for news_item in news:

        if datetime.datetime.now() - news_item.created_at < datetime.timedelta(
                seconds=conf.settings.FEED_ITEMS_DELAY):
            continue

        feed.add_item(
            title=news_item.caption,
            link=context.django_request.build_absolute_uri(
                url('news:show', news_item.id)),
            description=news_item.html_content,
            pubdate=news_item.created_at,
            comments=url('forum:threads:show', news_item.forum_thread_id)
            if news_item.forum_thread_id else None,
            unique_id=str(news_item.id))

    return dext_views.Atom(feed)
Пример #6
0
def _feed(request, activities, feed_title, feed_description):
    feed = feedgenerator.Atom1Feed(
        title=feed_title,
        link=settings.SITE_URL,
        description=feed_description,
        language='en'  # TODO: something better to do?
    )

    for activity in activities:
        item_data = activity.printable_data()
        item_data_text = activity.printable_data(html=False, link=False)
        item_metadata = activity.created  # TODO dateformat
        item_body = render_to_string('feed/feed_item.txt', {
            'title': item_data,
            'body': item_metadata,
        },
                                     context_instance=RequestContext(request))
        feed.add_item(
            title=item_data_text,
            link=settings.SITE_URL,
            description=item_body,
            pubdate=activity.created,
            unique_id=get_tag_uri(settings.SITE_URL, activity.created),
            author_name=activity.user.username if activity.user else '-',
        )

    response = HttpResponse(mimetype=feed.mime_type)
    feed.write(response, 'utf-8')
    return response
Пример #7
0
    def update_feed_data(self):
        feed = feedparser.parse(
            "https://forums.bunsenlabs.org/extern.php?action=feed&fid=12&type=atom"
        )
        refeed = feedgenerator.Atom1Feed(
            'BunsenLabs Linux News',
            'https://forums.bunsenlabs.org/viewforum.php?id=12', "")

        def mapper(e):
            opdata = self.retrieve_op_data(e['link'])
            return {
                "link": self.head(e['link'], '&'),
                "date": self.head(e['updated'], 'T'),
                "updated": self.head(opdata['updated'], 'T'),
                "op_summary": opdata['summary'],
                "title": " ".join(e['title'].split())
            }

        entries = list(map(mapper, feed.entries))
        # JSON API
        global PUBLIC
        PUBLIC = {"entries": entries, "ts": int(time.time())}
        # ATOM XML API
        for e in entries:
            refeed.add_item(e["title"],
                            e["link"],
                            e["op_summary"],
                            updateddate=datetime.datetime.strptime(
                                e["updated"], "%Y-%m-%dT%H:%M:%SZ"))
        global PUBLIC_ATOM
        PUBLIC_ATOM = refeed.writeString("utf-8")
Пример #8
0
 def test_atom1_mime_type(self):
     """
     Test to make sure Atom MIME type has UTF8 Charset parameter set
     """
     atom_feed = feedgenerator.Atom1Feed("title", "link", "description")
     self.assertEqual(atom_feed.mime_type,
                      "application/atom+xml; charset=utf-8")
Пример #9
0
  def get(self):
    (forum, siteroot, tmpldir) = forum_siteroot_tmpldir_from_url(self.request.path_info)
    if not forum or forum.is_disabled:
      return self.error(HTTP_NOT_FOUND)

    feed = feedgenerator.Atom1Feed(
      title = forum.title or forum.url,
      link = my_hostname() + siteroot + "rssall",
      description = forum.tagline)
  
    posts = Post.gql("WHERE forum = :1 AND is_deleted = False ORDER BY created_on DESC", forum).fetch(25)
    for post in posts:
      topic = post.topic
      title = topic.subject
      link = my_hostname() + siteroot + "topic?id=" + str(topic.key().id())
      msg = post.message
      # TODO: a hack: using a full template to format message body.
      # There must be a way to do it using straight django APIs
      name = post.user_name
      if name:
        t = Template("<strong>{{ name }}</strong>: {{ msg|striptags|escape|urlize|linebreaksbr }}")
      else:
        t = Template("{{ msg|striptags|escape|urlize|linebreaksbr }}")
      c = Context({"msg": msg, "name" : name})
      description = t.render(c)
      pubdate = post.created_on
      feed.add_item(title=title, link=link, description=description, pubdate=pubdate)
    feedtxt = feed.writeString('utf-8')
    self.response.headers['Content-Type'] = 'text/xml'
    self.response.out.write(feedtxt)
Пример #10
0
    def fetch(self, address=None):
        data = {}
        if not address:
            address = self.feed.feed_address
        self.address = address
        twitter_user = None

        if '/lists/' in address:
            list_id = self.extract_list_id()
            if not list_id:
                return

            tweets, list_info = self.fetch_list_timeline(list_id)
            if not tweets:
                return

            data['title'] = "%s on Twitter" % list_info.full_name
            data['link'] = "https://twitter.com%s" % list_info.uri
            data['description'] = "%s on Twitter" % list_info.full_name
        elif '/search' in address:
            search_query = self.extract_search_query()
            if not search_query:
                return

            tweets = self.fetch_search_query(search_query)
            if not tweets:
                return

            data['title'] = "\"%s\" on Twitter" % search_query
            data['link'] = "%s" % address
            data['description'] = "Searching \"%s\" on Twitter" % search_query
        else:
            username = self.extract_username()
            if not username:
                logging.debug(
                    u'   ***> [%-30s] ~FRTwitter fetch failed: %s: No active user API access'
                    % (self.feed.log_title[:30], self.address))
                return

            twitter_user = self.fetch_user(username)
            if not twitter_user:
                return
            tweets = self.user_timeline(twitter_user)

            data['title'] = "%s on Twitter" % username
            data['link'] = "https://twitter.com/%s" % username
            data['description'] = "%s on Twitter" % username

        data['lastBuildDate'] = datetime.datetime.utcnow()
        data[
            'generator'] = 'NewsBlur Twitter API Decrapifier - %s' % settings.NEWSBLUR_URL
        data['docs'] = None
        data['feed_url'] = address
        rss = feedgenerator.Atom1Feed(**data)

        for tweet in tweets:
            story_data = self.tweet_story(tweet.__dict__)
            rss.add_item(**story_data)

        return rss.writeString('utf-8')
Пример #11
0
def r_list_public_atom(request):
    qs = Event.objects.public_only().filter_by_period(
        from_date=datetime.now().date(), )
    tag = request.GET.get('tag')
    if tag:
        qs = qs.filter_by_tag(tag)

    qs = qs.prefetch_related('tags').prefetch_related('vk_announcement')

    fg = feedgenerator.Atom1Feed(
        title='Публичные мероприятия Кочерги',
        link=
        f'{settings.KOCHERGA_API_ROOT}/public_events_atom',  # should we add query params here?
        description='Публичные мероприятия Кочерги',
        author_name='Кочерга',
    )

    for event in reversed(qs):
        # fe.id(f'{settings.KOCHERGA_API_ROOT}/public_event/{event.uuid}')
        fg.add_item(
            title=event.title,
            link=event.public_link(),
            description=event.summary,
            pubdate=event.start,
        )

    return HttpResponse(fg.writeString('utf-8'))
Пример #12
0
    def render_atom_feed(self, template_path, data):
        prefix = '%s://%s' % (self.request.scheme, self.request.host)

        feed = feedgenerator.Atom1Feed(
            title=_(u'HTML5Rocks - Posts & Tutorials'),
            link=prefix,
            description=_(
                u'A resource for developers looking to put HTML5 to use '
                'today, including information on specific features and '
                'when to use them in your apps.'),
            language=u'en')
        for tut in data:
            author_name = unicode(tut['author_id'])
            if 'second_author' in tut:
                author_name += ',' + tut['second_author']
            title = tut['title']
            if 'subtitle' in tut and tut['subtitle']:
                title += ': ' + tut['subtitle']
            feed.add_item(title=unicode(title),
                          link=prefix + tut['href'],
                          description=unicode(tut['description']),
                          pubdate=tut['pubdate'],
                          author_name=author_name,
                          categories=tut['categories'])
        self.response.headers.add_header('Content-Type',
                                         'application/atom+xml')
        self.response.out.write(feed.writeString('utf-8'))
Пример #13
0
    def update_feed_data(self) -> None:
        json_entries = []
        entry_data = None

        feed = feedparser.parse(self._announcement_url)
        refeed = feedgenerator.Atom1Feed('BunsenLabs Linux News',
                                         self._info_forum_url,
                                         "",
                                         feed_guid=self.__feed_guid)

        with ThreadPoolExecutor(max_workers=4) as executor:
            for entry, entry_data in zip(
                    feed.entries,
                    executor.map(self.retrieve_op_data, feed.entries)):

                title = " ".join(entry['title'].split())
                link = self.head(entry['link'], '&')
                date = self.head(entry['updated'], 'T')
                updated = entry_data['updated']
                op_summary = entry_data['summary']
                fulltext = entry_data['fulltext']
                unique_id = str(uuid.uuid5(uuid.NAMESPACE_URL, link))

                self.log("News item {uuid} updated {updated} ({title})".format(
                    uuid=unique_id, title=title, updated=updated))

                refeed.add_item(title,
                                link,
                                fulltext,
                                updateddate=datetime.datetime.strptime(
                                    updated, "%Y-%m-%d"),
                                unique_id=unique_id)

                json_entries.append({
                    "link": link,
                    "date": date,
                    "updated": updated,
                    "op_summary": op_summary,
                    "title": title
                })

        json_entries = sorted(json_entries,
                              key=lambda e: e['updated'],
                              reverse=True)
        self.emit(
            payload={
                "endpoint": "/feed/news",
                "data": {
                    "entries": json_entries,
                    "ts": int(time.time())
                }
            })

        self.emit(payload={
            "endpoint": "/feed/news/atom",
            "data": refeed.writeString("utf-8")
        })
Пример #14
0
    def fetch(self):
        page_name = self.extract_page_name()
        if not page_name:
            return

        facebook_user = self.facebook_user()
        if not facebook_user:
            return

        # If 'video', use video API to get embed:
        # f.get_object('tastyvegetarian', fields='posts')
        # f.get_object('1992797300790726', fields='embed_html')
        feed = self.fetch_page_feed(facebook_user, page_name,
                                    'name,about,posts,videos,photos')

        data = {}
        data['title'] = feed.get('name', "%s on Facebook" % page_name)
        data['link'] = feed.get('link', "https://facebook.com/%s" % page_name)
        data['description'] = feed.get('about', "%s on Facebook" % page_name)
        data['lastBuildDate'] = datetime.datetime.utcnow()
        data[
            'generator'] = 'NewsBlur Facebook API Decrapifier - %s' % settings.NEWSBLUR_URL
        data['docs'] = None
        data['feed_url'] = self.feed.feed_address
        rss = feedgenerator.Atom1Feed(**data)
        merged_data = []

        posts = feed.get('posts', {}).get('data', None)
        if posts:
            for post in posts:
                story_data = self.page_posts_story(facebook_user, post)
                if not story_data:
                    continue
                merged_data.append(story_data)

        videos = feed.get('videos', {}).get('data', None)
        if videos:
            for video in videos:
                story_data = self.page_video_story(facebook_user, video)
                if not story_data:
                    continue
                for seen_data in merged_data:
                    if story_data['link'] == seen_data['link']:
                        # Video wins over posts (and attachments)
                        seen_data['description'] = story_data['description']
                        seen_data['title'] = story_data['title']
                        break

        for story_data in merged_data:
            rss.add_item(**story_data)

        return rss.writeString('utf-8')
Пример #15
0
def generate_atom_feed(request, search):
    """Generates ATOM feed for first 100 results"""
    search_query = request.GET.get('q', None)

    if search_query:
        title = _(u'Firefox Input: {query}').format(query=search_query)
    else:
        title = _(u'Firefox Input')

    # Build the non-atom dashboard url and maintain all the
    # querystring stuff we have
    dashboard_url = request.build_absolute_uri()
    dashboard_url = dashboard_url.replace('format=atom', '')
    dashboard_url = dashboard_url.replace('&&', '&')
    if dashboard_url.endswith(('?', '&')):
        dashboard_url = dashboard_url[:-1]

    feed = feedgenerator.Atom1Feed(
        title=title,
        link=dashboard_url,
        description=_('Search Results From Firefox Input'),
        author_name=_('Firefox Input'),
    )
    for response in search[:100]:
        # TODO: Remove this after we pick up the fixes in the latest
        # elasticutils that causes results to come back as Python
        # datetimes rather than strings.
        created = datetime.strptime(response.created, '%Y-%m-%dT%H:%M:%S')
        categories = {
            'sentiment': _('Happy') if response.happy else _('Sad'),
            'platform': response.platform,
            'locale': response.locale
        }
        categories = (':'.join(item) for item in categories.items())

        link_url = reverse('response_view', args=(response.id,))
        link_url = request.build_absolute_uri(link_url)

        feed.add_item(
            title=_('Response id: {id}').format(id=response.id),
            description=response.description,
            link=link_url,
            pubdate=created,
            categories=categories
        )
    return HttpResponse(
        feed.writeString('utf-8'), mimetype='application/atom+xml')
Пример #16
0
def rss(request, board_name):
    '''
    주어진 게시판에 대한 RSS 파일을 제공한다.

    @type  request: Django Request
    @param request: Request
    @type  board_name: string
    @param board_name: 검색하려는 글이 있는 board name
    '''

    from django.utils import feedgenerator

    server = warara_middleware.get_server()
    sess, r = warara.check_logged_in(request)

    feed = feedgenerator.Atom1Feed(
        title=u'ARA/%s' % board_name,
        link=u'/board/%s/rss/' % board_name,
        description=u'A RSS of all articles in %s board' % board_name)

    page_no = 1
    page_length = 20
    article_list = server.article_manager.article_list(sess, board_name, None,
                                                       page_no, page_length,
                                                       True).hit
    fake_author(article_list, False)

    for article in article_list:
        if article.heading:
            article_title = u'[%s] %s' % (article.heading, article.title)
        else:
            article_title = u'%s' % article.title
        feed.add_item(title=article_title,
                      link=u'/board/%s/%d/' % (board_name, article.id),
                      author_name=article.author_nickname,
                      pubdate=datetime.datetime.fromtimestamp(article.date),
                      description=u'author : %s date : %s' %
                      (article.author_nickname,
                       datetime.datetime.fromtimestamp(article.date)))

    return HttpResponse(feed.writeString('utf-8'),
                        mimetype=feedgenerator.Atom1Feed.mime_type)
Пример #17
0
def atom(entries):
    '''Build and return an Atom feed.

    entries is a list of entries straight from the FriendFeed API.
    '''
    f = feedgenerator.Atom1Feed(
        title = 'FF To Go',
        link = 'http://www.fftogo.com',
        description = 'FF To Go',
        language = 'en',
    )
    for entry in entries:
        f.add_item(
            title = entry['title'],
            link = entry['link'],
            description = '<a href="http://www.fftogo.com/e/%s">View in fftogo</a>' % entry['id'],
            author_name = entry['user']['name'],
            pubdate = entry['updated'],
        )
    return HttpResponse(f.writeString('utf-8'))
Пример #18
0
    def render_atom_feed(self, title, data):
        prefix = '%s://%s%s' % (self.request.scheme, self.request.host,
                                self.request.path.replace('.xml', ''))

        feed = feedgenerator.Atom1Feed(
            title=unicode('%s - %s' % (settings.APP_TITLE, title)),
            link=prefix,
            description=u'New features exposed to web developers',
            language=u'en')
        for f in data:
            pubdate = datetime.datetime.strptime(str(f['updated'][:19]),
                                                 '%Y-%m-%d  %H:%M:%S')
            feed.add_item(title=unicode(f['name']),
                          link='%s/%s' % (prefix, f.get('id')),
                          description=f.get('summary', ''),
                          pubdate=pubdate,
                          author_name=unicode(settings.APP_TITLE),
                          categories=[f['category']])
        self.response.headers.add_header('Content-Type',
                                         'application/atom+xml;charset=utf-8')
        self.response.out.write(feed.writeString('utf-8'))
Пример #19
0
def atom_feed(page, **kwargs):
    """
    Simple Atom Syndication Format 1.0 feed.
    """
    title = u'unalog - ' + kwargs.get('title', u'')
    link = kwargs.get('link', u'http://unalog.com/')
    description = kwargs.get('description', u'unalog feed')
    language = kwargs.get('language', u'en')
    feed = feedgenerator.Atom1Feed(title=title,
                                   link=link,
                                   description=description,
                                   language=language)
    for entry in page.object_list:
        feed.add_item(
            title=entry.title,
            link=entry.url.value,
            id=reverse('entry', args=[entry.id]),
            description=entry.comment,
            pubdate=entry.date_created,
            categories=[entry_tag.tag.name for entry_tag in entry.tags.all()])
    return HttpResponse(feed.writeString('utf8'), mimetype='application/xml')
Пример #20
0
    def gen_atom_feed(self, include_notes):
        url_path = "/atom.xml"
        if include_notes: url_path = "/atom-all.xml"
        feed = feedgenerator.Atom1Feed(
            title = "Krzysztof Kowalczyk blog",
            link = self.request.host_url + url_path,
            description = "Krzysztof Kowalczyk blog")

        query = Article.gql("WHERE is_public = True AND is_deleted = False ORDER BY published_on DESC")
        count = 0
        for a in query.fetch(200):
            if not include_notes and NOTE_TAG in a.tags: continue
            title = a.title
            link = self.request.host_url + "/" + a.permalink
            article_gen_html_body(a)
            description = a.html_body
            pubdate = a.published_on
            feed.add_item(title=title, link=link, description=description, pubdate=pubdate)
            count += 1
            if count >= 25: break
        feedtxt = feed.writeString('utf-8')
        return feedtxt
Пример #21
0
def test_Atom1Feed(inp):
    feedgenerator.Atom1Feed(inp, "link", "description")
Пример #22
0
    def get(self, p):

        HTTP_DATE_FMT = "%a, %d %b %Y %H:%M:%S GMT"

        if 'If-Modified-Since' in self.request.headers:
            try:

                last_seen = datetime.strptime(
                    self.request.headers['If-Modified-Since'], HTTP_DATE_FMT)
                ud = memcache.get('time_' + p)
                if ud and last_seen and ud <= last_seen:
                    logging.info('returning 304')
                    self.response.set_status(304)
                    return
            except:
                test = 1

        #logging.info(self.request.headers)

        op = memcache.get(p)
        if op is not None:
            logging.info('delivering from cache')
            self.response.headers['Content-Type'] = 'application/atom+xml'
            self.response.out.write(op)
            return

        try:
            logging.info('re-requesting feed')
            url = 'https://plus.google.com/_/stream/getactivities/' + p + '/?sp=[1,2,"' + p + '",null,null,null,null,"social.google.com",[]]'
            result = urlfetch.fetch(url)
            if result.status_code == 200:
                regex = re.compile(',,', re.M)
                txt = result.content
                txt = txt[5:]
                txt = regex.sub(',null,', txt)
                txt = regex.sub(',null,', txt)
                txt = txt.replace('[,', '[null,')
                txt = txt.replace(',]', ',null]')
                obj = json.loads(txt)

                posts = obj[1][0]

                if not posts:
                    self.error(400)
                    self.response.out.write(
                        '<h1>400 - No Public Items Found</h1>')
                    return

                author = posts[0][3]
                updated = datetime.fromtimestamp(float(posts[0][5]) / 1000)

                feed = feedgenerator.Atom1Feed(
                    title="Google Plus User Feed - " + author,
                    link="https://plus.google.com/" + p,
                    description="Unofficial feed for Google Plus",
                    language="en",
                    author_name=author,
                    feed_url="http://plusfeeds.appspot.com/" + p)

                count = 0

                for post in posts:
                    #logging.info('post ' + post[21])

                    count = count + 1
                    if count > 10:
                        break

                    dt = datetime.fromtimestamp(float(post[5]) / 1000)
                    permalink = "https://plus.google.com/" + post[21]

                    desc = ''

                    if post[47]:
                        desc = post[47]
                    elif post[4]:
                        desc = post[4]

                    if post[44]:
                        desc = desc + ' <br/><br/><a href="https://plus.google.com/' + post[
                            44][1] + '">' + post[44][
                                0] + '</a> originally shared this post: '

                    if post[66]:

                        if post[66][0][1]:
                            desc = desc + ' <br/><br/><a href="' + post[66][0][
                                1] + '">' + post[66][0][3] + '</a>'

                        if post[66][0][6]:
                            if post[66][0][6][0][1].find('image') > -1:
                                desc = desc + ' <p><img src="http:' + post[66][
                                    0][6][0][2] + '"/></p>'
                            else:
                                desc = desc + ' <a href="' + post[66][0][6][0][
                                    8] + '">' + post[66][0][6][0][8] + '</a>'

                    if desc == '':
                        desc = permalink

                    ptitle = desc
                    ptitle = htmldecode(ptitle)
                    ptitle = strip_tags(ptitle)[:75]

                    feed.add_item(title=ptitle,
                                  link=permalink,
                                  pubdate=dt,
                                  description=desc)

                output = feed.writeString('UTF-8')
                memcache.set(p, output, 10 * 60)
                memcache.set('time_' + p, updated)

                list = {}
                mlist = memcache.get('list')

                if mlist:
                    for k, v in mlist.iteritems():
                        list[k] = v

                list[p] = author
                memcache.set('list', list)

                self.response.headers['Last-Modified'] = updated.strftime(
                    HTTP_DATE_FMT)
                #self.response.headers['ETag'] = '"%s"' % (content.etag,)
                self.response.headers['Content-Type'] = 'application/atom+xml'
                self.response.out.write(output)

            else:
                self.error(404)
                self.response.out.write('<h1>404 Not Found</h1>')

        except Exception, err:
            self.error(500)
            self.response.out.write('<h1>500 Server Error</h1><p>' + str(err) +
                                    '</p>')
Пример #23
0
    def fetch_youtube(self, address):
        username = None
        channel_id = None
        list_id = None
        
        if 'gdata.youtube.com' in address:
            try:
                username_groups = re.search('gdata.youtube.com/feeds/\w+/users/(\w+)/', address)
                if not username_groups:
                    return
                username = username_groups.group(1)
            except IndexError:
                return
        elif 'youtube.com/feeds/videos.xml?user='******'user'][0]
            except IndexError:
                return            
        elif 'youtube.com/feeds/videos.xml?channel_id=' in address:
            try:
                channel_id = urlparse.parse_qs(urlparse.urlparse(address).query)['channel_id'][0]
            except IndexError:
                return            
        elif 'youtube.com/playlist' in address:
            try:
                list_id = urlparse.parse_qs(urlparse.urlparse(address).query)['list'][0]
            except IndexError:
                return            
        
        if channel_id:
            video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?channel_id=%s" % channel_id)
            channel_json = requests.get("https://www.googleapis.com/youtube/v3/channels?part=snippet&id=%s&key=%s" %
                                       (channel_id, settings.YOUTUBE_API_KEY))
            channel = json.decode(channel_json.content)
            try:
                username = channel['items'][0]['snippet']['title']
                description = channel['items'][0]['snippet']['description']
            except (IndexError, KeyError):
                return
        elif list_id:
            playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlists?part=snippet&id=%s&key=%s" %
                                       (list_id, settings.YOUTUBE_API_KEY))
            playlist = json.decode(playlist_json.content)
            try:
                username = playlist['items'][0]['snippet']['title']
                description = playlist['items'][0]['snippet']['description']
            except (IndexError, KeyError):
                return
            channel_url = "https://www.youtube.com/playlist?list=%s" % list_id
        elif username:
            video_ids_xml = requests.get("https://www.youtube.com/feeds/videos.xml?user=%s" % username)
            description = "YouTube videos uploaded by %s" % username
        else:
            return
                    
        if list_id:
            playlist_json = requests.get("https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&playlistId=%s&key=%s" %
                                       (list_id, settings.YOUTUBE_API_KEY))
            playlist = json.decode(playlist_json.content)
            try:
                video_ids = [video['snippet']['resourceId']['videoId'] for video in playlist['items']]
            except (IndexError, KeyError):
                return
        else:    
            if video_ids_xml.status_code != 200:
                return
            video_ids_soup = BeautifulSoup(video_ids_xml.content)
            channel_url = video_ids_soup.find('author').find('uri').getText()
            video_ids = []
            for video_id in video_ids_soup.findAll('yt:videoid'):
                video_ids.append(video_id.getText())
        
        videos_json = requests.get("https://www.googleapis.com/youtube/v3/videos?part=contentDetails%%2Csnippet&id=%s&key=%s" %
             (','.join(video_ids), settings.YOUTUBE_API_KEY))
        videos = json.decode(videos_json.content)

        data = {}
        data['title'] = ("%s's YouTube Videos" % username if 'Uploads' not in username else username)
        data['link'] = channel_url
        data['description'] = description
        data['lastBuildDate'] = datetime.datetime.utcnow()
        data['generator'] = 'NewsBlur YouTube API v3 Decrapifier - %s' % settings.NEWSBLUR_URL
        data['docs'] = None
        data['feed_url'] = address
        rss = feedgenerator.Atom1Feed(**data)

        for video in videos['items']:
            thumbnail = video['snippet']['thumbnails'].get('maxres')
            if not thumbnail:
                thumbnail = video['snippet']['thumbnails'].get('high')
            if not thumbnail:
                thumbnail = video['snippet']['thumbnails'].get('medium')
            duration_sec = isodate.parse_duration(video['contentDetails']['duration']).seconds
            if duration_sec >= 3600:
                hours = (duration_sec / 3600)
                minutes = (duration_sec - (hours*3600)) / 60
                seconds = duration_sec - (hours*3600) - (minutes*60)
                duration = "%s:%s:%s" % (hours, '{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
            else:
                minutes = duration_sec / 60
                seconds = duration_sec - (minutes*60)
                duration = "%s:%s" % ('{0:02d}'.format(minutes), '{0:02d}'.format(seconds))
            content = """<div class="NB-youtube-player"><iframe allowfullscreen="true" src="%s?iv_load_policy=3"></iframe></div>
                         <div class="NB-youtube-stats"><small>
                             <b>From:</b> <a href="%s">%s</a><br />
                             <b>Duration:</b> %s<br />
                         </small></div><hr>
                         <div class="NB-youtube-description">%s</div>
                         <img src="%s" style="display:none" />""" % (
                ("https://www.youtube.com/embed/" + video['id']),
                channel_url, username,
                duration,
                linkify(linebreaks(video['snippet']['description'])),
                thumbnail['url'] if thumbnail else "",
            )

            link = "http://www.youtube.com/watch?v=%s" % video['id']
            story_data = {
                'title': video['snippet']['title'],
                'link': link,
                'description': content,
                'author_name': username,
                'categories': [],
                'unique_id': "tag:youtube.com,2008:video:%s" % video['id'],
                'pubdate': dateutil.parser.parse(video['snippet']['publishedAt']),
            }
            rss.add_item(**story_data)
        
        return rss.writeString('utf-8')
Пример #24
0
 def test_deterministic_attribute_order(self):
     feed = feedgenerator.Atom1Feed('title', '/link/', 'desc')
     feed_content = feed.writeString('utf-8')
     self.assertIn('href="/link/" rel="alternate"', feed_content)
Пример #25
0
 def test_atom_add_item(self):
     # Not providing any optional arguments to Atom1Feed.add_item()
     feed = feedgenerator.Atom1Feed('title', '/link/', 'descr')
     feed.add_item('item_title', 'item_link', 'item_description')
     feed.writeString('utf-8')
Пример #26
0
 def test_atom_add_item(self):
     # Not providing any optional arguments to Atom1Feed.add_item()
     feed = feedgenerator.Atom1Feed("title", "/link/", "descr")
     feed.add_item("item_title", "item_link", "item_description")
     feed.writeString("utf-8")
Пример #27
0
 def test_deterministic_attribute_order(self):
     feed = feedgenerator.Atom1Feed("title", "/link/", "desc")
     feed_content = feed.writeString("utf-8")
     self.assertIn('href="/link/" rel="alternate"', feed_content)
Пример #28
0
            logging.debug(u'   ***> [%-30s] ~FRTwitter fetch failed, disconnecting twitter: %s: %s' % 
                          (self.feed.title[:30], address, e))
            social_services.disconnect_twitter()
            return
        
        tweets = twitter_user.timeline()
        
        data = {}
        data['title'] = "%s on Twitter" % username
        data['link'] = "https://twitter.com/%s" % username
        data['description'] = "%s on Twitter" % username
        data['lastBuildDate'] = datetime.datetime.utcnow()
        data['generator'] = 'NewsBlur Twitter API Decrapifier - %s' % settings.NEWSBLUR_URL
        data['docs'] = None
        data['feed_url'] = address
        rss = feedgenerator.Atom1Feed(**data)

        for tweet in tweets:
            categories = []
            entities = ""

            for media in tweet.entities.get('media', []):
                if 'media_url_https' not in media: continue
                if media['type'] == 'photo':
                    entities += "<img src=\"%s\"> " % media['media_url_https']
                    if 'photo' not in categories:
                        categories.append('photo')

            content = """<div class="NB-twitter-rss">
                             <div class="NB-twitter-rss-tweet">%s</div><hr />
                             <div class="NB-twitter-rss-entities">%s</div>