def _on_api_response(self, response): if response is None: logging.error("API request for %s failed." % self.gplus_user_id) self.write("Unable to fetch content for this Google+ ID; it may not be authenticated. See http://%s for more information." % self.request.host) self.set_status(401) return self.finish() if response.error: if response.code == 403: logging.error("API Request 403: %r" % (json.loads(response.body))) self.set_status(503) self.write("Unable to fulfill request at this time - Google+ API rate limit exceeded.") return self.finish() else: logging.error("AsyncHTTPRequest error: %r, %r" % (response.error, response)) return self.send_error(500) else: data = json.loads(response.body) headers = {'Content-Type': 'application/atom+xml'} params = { 'userid': self.gplus_page_id or self.gplus_user_id, 'baseurl': 'http://%s' % self.request.host, 'requesturi': 'http://%s%s' % (self.request.host, self.request.uri.split('?', 1)[0]), } if 'items' not in data or not data['items']: params['lastupdate'] = dateutils.to_atom_format(datetime.datetime.today()) return self._respond(headers, empty_feed_template.format(**params)) posts = data['items'] lastupdate = max(dateutils.from_iso_format(p['updated']) for p in posts) params['author'] = xhtml_escape(posts[0]['actor']['displayName']) params['lastupdate'] = dateutils.to_atom_format(lastupdate) headers['Last-Modified'] = dateutils.to_http_format(lastupdate) params['entrycontent'] = u''.join(entry_template.format(**get_post_params(p)) for p in posts) body = feed_template.format(**params) Cache.set(self.cache_key, {'headers': headers, 'body': body}, time=Config.getint('cache', 'stream-expire')) return self._respond(headers, body)
def get_post_params(post): post_updated = dateutils.from_iso_format(post['updated']) post_published = dateutils.from_iso_format(post['published']) post_id = post['id'] permalink = post['url'] item = post['object'] if post['verb'] == 'post': content = [item['content']] elif post['verb'] == 'share': content = [post.get('annotation', 'Shared:')] if 'actor' in item: content.append('<br/><br/>') if 'url' in item['actor'] and 'displayName' in item['actor']: content.append( '<a href="%s">%s</a>' % (item['actor']['url'], item['actor']['displayName'])) content.append(' originally shared this post: ') elif 'displayName' in item['actor']: content.append(item['actor']['displayName']) content.append(' originally shared this post: ') content.append('<br/><blockquote>') content.append(item['content']) content.append('</blockquote>') elif post['verb'] == 'checkin': content = [item['content']] place = post.get('placeName', '') if place: if item['content']: # Add some spacing if there's actually a comment content.append('<br/><br/>') content.append('Checked in at %s' % place) else: content = [] if 'attachments' in item: # attached content for attach in item['attachments']: content.append('<br/><br/>') if attach['objectType'] == 'article': # Attached link content.append('<a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached link'))) elif attach['objectType'] == 'photo': # Attached image content.append('<img src="%s" alt="%s" />' % (attach['image']['url'], attach['image'].get( 'displayName', 'attached image')) ) # G+ doesn't always supply alt text... elif attach['objectType'] == 'photo-album' or attach[ 'objectType'] == 'album': # Attached photo album link content.append('Album: <a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached album'))) elif attach['objectType'] == 'video': # Attached video content.append('Video: <a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached video'))) else: # Unrecognized attachment type content.append('[unsupported post attachment of type "%s"]' % attach['objectType']) # If no actual parseable content was found, just link to the post post_content = u''.join(content) or permalink # Generate the post title out of just text [max: 100 characters] post_title = u' '.join(x.string for x in soup(post_content).findAll(text=True)) post_title = space_compress_regex.sub(' ', post_title) if len(post_title) > 100: if post_title == permalink: post_title = u"A public G+ post" else: candidate_title = post_title[:97] if '&' in candidate_title[ -5:]: # Don't risk cutting off HTML entities candidate_title = candidate_title.rsplit('&', 1)[0] if ' ' in candidate_title[ -5:]: # Reasonably avoid cutting off words candidate_title = candidate_title.rsplit(' ', 1)[0] post_title = u"%s..." % candidate_title return { 'title': post_title, 'permalink': xhtml_escape(permalink), 'postatomdate': dateutils.to_atom_format(post_updated), 'postatompubdate': dateutils.to_atom_format(post_published), 'postdate': post_published.strftime('%Y-%m-%d'), 'id': xhtml_escape(post_id), 'summary': xhtml_escape(post_content), }
def get_post_params(post): post_updated = dateutils.from_iso_format(post['updated']) post_published = dateutils.from_iso_format(post['published']) post_id = post['id'] permalink = post['url'] item = post['object'] content_for_title = [] if post['verb'] == 'post': content = [item['content']] content_for_title.extend(content) elif post['verb'] == 'share': content = [post.get('annotation')] if post.get('annotation') else [] content_for_title.extend(content) if 'actor' in item: content.append('<br/><br/>') if 'url' in item['actor'] and 'displayName' in item['actor']: content.append('<a href="%s">%s</a>' % (item['actor']['url'], item['actor']['displayName'])) content.append(' originally shared this post: ') elif 'displayName' in item['actor']: content.append(item['actor']['displayName']) content.append(' originally shared this post: ') content_for_title.append('Shared from %s: ' % item['actor'].get('displayName', 'elsewhere')) content.append('<br/><blockquote>') content.append(item['content']) content.append('</blockquote>') content_for_title.append(item['content']) elif post['verb'] == 'checkin': content = [item['content']] place = post.get('placeName', '') if place: if item['content']: # Add some spacing if there's actually a comment content.append('<br/><br/>') content.append('Checked in at %s' % place) content_for_title.extend(content) else: content = [] if 'attachments' in item: # attached content for attach in item['attachments']: if content: content.append('<br/><br/>') attach_title = False else: attach_title = True if attach.get('objectType') == 'article': # Attached link content.append('<a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached link'))) # Possible attached photo if 'image' in attach: content.append('<br/><img src="%s" alt="%s" />' % (attach['image']['url'], attach['image'].get('displayName', 'attached image'))) elif attach.get('objectType') == 'photo': # Attached image content.append('<img src="%s" alt="%s" />' % (attach['image']['url'], attach['image'].get('displayName', 'attached image'))) # G+ doesn't always supply alt text... elif attach.get('objectType') == 'photo-album' or attach.get('objectType') == 'album': # Attached photo album link content.append('Album: <a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached album'))) elif attach.get('objectType') == 'video': # Attached video content.append('Video: <a href="%s">%s</a>' % (attach['url'], attach.get('displayName', 'attached video'))) else: # Unrecognized attachment type content.append('[unsupported post attachment of type "%s"]' % attach.get('objectType')) logging.info('Unrecognized attachment: %r', attach) if attach_title: content_for_title.extend(content) # If no actual parseable content was found, just link to the post post_content = u''.join(content).strip() or permalink # Generate the post title out of just text [max: 100 characters] post_title_content = re.split(r'<br\s*/?>', ''.join(content_for_title))[0] post_title = u' '.join(x.string for x in soup(post_title_content).findAll(text=True)) post_title = space_compress_regex.sub(' ', post_title).strip() if post_title == permalink or not post_title: post_title = u"A public G+ post" else: if len(post_title) > 100: candidate_title = post_title[:97] if '&' in candidate_title[-5:]: # Don't risk cutting off HTML entities candidate_title = candidate_title.rsplit('&', 1)[0] if ' ' in candidate_title[-5:]: # Reasonably avoid cutting off words candidate_title = candidate_title.rsplit(' ', 1)[0] post_title = u"%s..." % candidate_title return { 'title': post_title, 'permalink': xhtml_escape(permalink), 'postatomdate': dateutils.to_atom_format(post_updated), 'postatompubdate': dateutils.to_atom_format(post_published), 'postdate': post_published.strftime('%Y-%m-%d'), 'id': xhtml_escape(post_id), 'summary': xhtml_escape(post_content), }
def _on_api_response(self, response): if response is None: logging.error("API request for %s failed." % self.gplus_user_id) self.write( "Unable to fetch content for this Google+ ID; it may not be authenticated. See http://%s for more information." % self.request.host) self.set_status(401) return self.finish() if response.error: if response.code == 403: logging.error("API Request 403: %r" % (json.loads(response.body))) self.set_status(503) self.write( "Unable to fulfill request at this time - Google+ API rate limit exceeded." ) return self.finish() else: logging.error("AsyncHTTPRequest error: %r, %r" % (response.error, response)) return self.send_error(500) else: data = json.loads(response.body) headers = {'Content-Type': 'application/atom+xml'} params = { 'userid': self.gplus_page_id or self.gplus_user_id, 'baseurl': 'http://%s' % self.request.host, 'requesturi': 'http://%s%s' % (self.request.host, self.request.uri.split('?', 1)[0]), } if 'items' not in data or not data['items']: params['lastupdate'] = dateutils.to_atom_format( datetime.datetime.today()) return self._respond(headers, empty_feed_template.format(**params)) posts = data['items'] lastupdate = max( dateutils.from_iso_format(p['updated']) for p in posts) params['author'] = xhtml_escape(posts[0]['actor']['displayName']) params['lastupdate'] = dateutils.to_atom_format(lastupdate) headers['Last-Modified'] = dateutils.to_http_format(lastupdate) params['entrycontent'] = u''.join( entry_template.format(**get_post_params(p)) for p in posts) body = feed_template.format(**params) Cache.set(self.cache_key, { 'headers': headers, 'body': body }, time=Config.getint('cache', 'stream-expire')) return self._respond(headers, body)