def load_zine_reddit(): """Load the zine reddit.""" reddit_url = 'http://www.reddit.com' reddit_zine_url = reddit_url + '/r/zine' response = open_url(reddit_zine_url + '.json') try: data = load_json(response.data) finally: response.close() result = [] for item in islice(data['data']['children'], 20): d = item['data'] if not d['url'].startswith("http"): d['url'] = reddit_url + d['url'] result.append({ 'author': d['author'], 'created': datetime.utcfromtimestamp(d['created']), 'score': d['score'], 'title': d['title'], 'comments': d['num_comments'], 'url': d['url'], 'domain': d['domain'], 'author_url': reddit_url + '/user/%s/' % url_quote(d['author']), 'comment_url': '%s/comments/%s' % (reddit_zine_url, d['id']) }) return result
def handle_pingback_request(source_uri, target_uri): """This method is exported via XMLRPC as `pingback.ping` by the pingback API. """ app = get_application() # next we check if the source URL does indeed exist try: response = open_url(source_uri) except NetException: raise Fault(16, 'The source URL does not exist.') # we only accept pingbacks for links below our blog URL blog_url = app.cfg['blog_url'] if not blog_url.endswith('/'): blog_url += '/' if not target_uri.startswith(blog_url): raise Fault(32, 'The specified target URL does not exist.') path_info = target_uri[len(blog_url):] handler = endpoint = values = None while 1: try: endpoint, values = app.url_adapter.match(path_info) except RequestRedirect, e: path_info = e.new_url[len(blog_url):] except NotFound, e: break
def send_request(apikey, key_root, data, endpoint): """Send a request to the akismet server and return the response.""" url = 'http://%s%s/%s/%s' % (key_root and apikey + '.' or '', AKISMET_URL_BASE, AKISMET_VERSION, endpoint) try: response = open_url(url, data=url_encode(data)) except: return try: return response.data.strip() finally: response.close()
def send_request(apikey, key_root, data, endpoint): """Send a request to the akismet server and return the response.""" url = 'http://%s%s/%s/%s' % ( key_root and apikey + '.' or '', AKISMET_URL_BASE, AKISMET_VERSION, endpoint ) try: response = open_url(url, data=url_encode(data)) except: return try: return response.data.strip() finally: response.close()
def configure(self, request): form = WordPressImportForm() if request.method == 'POST' and form.validate(request.form): dump = request.files.get('dump') if form.data['download_url']: try: dump = open_url(form.data['download_url']).stream except Exception, e: error = _(u'Error downloading from URL: %s') % e elif not dump: return redirect_to('import/wordpress') try: blog = parse_feed(dump) except Exception, e: log.exception(_(u'Error parsing uploaded file')) flash(_(u'Error parsing uploaded file: %s') % e, 'error')
def configure(self, request): form = FeedImportForm() if request.method == 'POST' and form.validate(request.form): feed = request.files.get('feed') if form.data['download_url']: try: feed = open_url(form.data['download_url']).stream except Exception, e: log.exception(_('Error downloading feed')) flash(_(u'Error downloading from URL: %s') % e, 'error') if not feed: return redirect_to('import/feed') try: blog = parse_feed(feed) except Exception, e: log.exception(_(u'Error parsing uploaded file')) flash(_(u'Error parsing feed: %s') % e, 'error')
def configure(self, request): form = WordPressImportForm() if request.method == 'POST' and form.validate(request.form): dump = request.files.get('dump') if form.data['download_url']: try: dump = open_url(form.data['download_url']).stream except Exception, e: log.exception(_('Error downloading feed')) flash(_(u'Error downloading from URL: %s') % e, 'error') if not dump: return redirect_to('import/wordpress') try: blog = parse_feed(dump) except Exception, e: raise log.exception(_(u'Error parsing uploaded file')) flash(_(u'Error parsing uploaded file: %s') % e, 'error')
def pingback(source_uri, target_uri): """Try to notify the server behind `target_uri` that `source_uri` points to `target_uri`. If that fails an `PingbackError` is raised. """ try: response = open_url(target_uri) except: raise PingbackError(32) try: pingback_uri = response.headers['X-Pingback'] except KeyError: match = _pingback_re.search(response.data) if match is None: raise PingbackError(33) pingback_uri = unescape(match.group(1)) rpc = ServerProxy(pingback_uri) try: return rpc.pingback.ping(source_uri, target_uri) except Fault, e: raise PingbackError(e.faultCode)
def get_excerpt(response, url_hint, body_limit=1024 * 512): """Get an excerpt from the given `response`. `url_hint` is the URL which will be used as anchor for the excerpt. The return value is a tuple in the form ``(title, body)``. If one of the two items could not be calculated it will be `None`. """ if isinstance(response, basestring): response = open_url(response) contents = response.data[:body_limit] title_match = _title_re.search(contents) title = title_match and strip_tags(title_match.group(1)) or None link_re = re.compile(r'<a[^>]+?"\s*%s\s*"[^>]*>(.*?)</a>(?is)' % re.escape(url_hint)) for chunk in _chunk_re.split(contents): match = link_re.search(chunk) if not match: continue before = chunk[:match.start()] after = chunk[match.end():] raw_body = '%s\0%s' % (strip_tags(before).replace( '\0', ''), strip_tags(after).replace('\0', '')) body_match = re.compile(r'(?:^|\b)(.{0,120})\0(.{0,120})(?:\b|$)') \ .search(raw_body) if body_match: break else: return title, None before, after = body_match.groups() link_text = strip_tags(match.group(1)) if len(link_text) > 60: link_text = link_text[:60] + u' …' bits = before.split() bits.append(link_text) bits.extend(after.split()) return title, u'[…] %s […]' % u' '.join(bits)
def get_excerpt(response, url_hint, body_limit=1024 * 512): """Get an excerpt from the given `response`. `url_hint` is the URL which will be used as anchor for the excerpt. The return value is a tuple in the form ``(title, body)``. If one of the two items could not be calculated it will be `None`. """ if isinstance(response, basestring): response = open_url(response) contents = response.data[:body_limit] title_match = _title_re.search(contents) title = title_match and strip_tags(title_match.group(1)) or None link_re = re.compile(r'<a[^>]+?"\s*%s\s*"[^>]*>(.*?)</a>(?is)' % re.escape(url_hint)) for chunk in _chunk_re.split(contents): match = link_re.search(chunk) if not match: continue before = chunk[:match.start()] after = chunk[match.end():] raw_body = '%s\0%s' % (strip_tags(before).replace('\0', ''), strip_tags(after).replace('\0', '')) body_match = re.compile(r'(?:^|\b)(.{0,120})\0(.{0,120})(?:\b|$)') \ .search(raw_body) if body_match: break else: return title, None before, after = body_match.groups() link_text = strip_tags(match.group(1)) if len(link_text) > 60: link_text = link_text[:60] + u' …' bits = before.split() bits.append(link_text) bits.extend(after.split()) return title, u'[…] %s […]' % u' '.join(bits)