def get(self): search = self.request.get('search', '') gurl = ('http://content.guardianapis.com/search?q=%s&format=json' % urllib.quote(search)) results = geturl(gurl) results = json.loads(results) try: results = results['response']['results'] except KeyError: results = [] context = dict(search=search) fullresults = [] count = 0 for result in results: url = result['webUrl'] if count < 1: entities = getentities.get_entities(url) else: entities = {} fullresults.append(dict(url=url.encode('utf8'), entities=entities.get('entities'), categories=entities.get('categories'), title=result['webTitle'], author='Guardian', )) count += 1 context['results'] = fullresults self.render("topic.html", context)
def get(self): url=self.request.get('url') if not url.startswith('http'): url = 'http://' + url try: entities = getentities.get_entities(url) except getentities.DownloadError, e: entities = {'error': 'Download Error: %s. Try reloading' % str(e)} self.response.set_status(400)
def get(self): url = self.request.get('url', '') if not url.startswith('http'): url = 'http://' + url context = {} if url is not None: entities = getentities.get_entities(url) context.update(dict(url=url.encode('utf8'), entities=entities.get('entities'), categories=entities.get('categories'), )) self.render("url.html", context)
def get(self): url = self.request.get('url', '') if not url.startswith('http'): url = 'http://' + url context = {} if url is not None: try: entities = getentities.get_entities(url) except getentities.DownloadError, e: context['error'] = "Couldn't download external resource: %s" % str(e) entities = dict(entities=[], categories=[]) context.update(dict(url=url.encode('utf8'), entities=entities.get('entities'), categories=entities.get('categories'), ))
def get(self): url=self.request.get('url') entities = getentities.get_entities(url) self.response.headers['Content-Type'] = 'application/json' self.response.out.write(json.dumps(entities))