def markdownify(url_list, **options): articles = [] images = [] paragraph_links = options['paragraph_links'] wrap_text = options['wrap_text'] preamble = options['preamble'] for url in url_list: req = urllib2.Request(url,None,{'Referer': url_list[0]}) html = urllib2.urlopen(req).read() document = Document(html, url=url) readable_title = document.short_title() summary = document.summary() summary_doc = build_doc(summary) images.extend([a.get('src') for a in summary_doc.findall('.//img')]) articles.append(document.summary()) markdown_articles = [] for (article, url) in zip(articles, url_list): h = html2text.HTML2Text(baseurl=url) h.inline_links = False h.links_each_paragraph = (paragraph_links and 1) or 0 h.body_width = (wrap_text and 78) or 0 markdown_articles.append(h.handle(article)) combined_article = u"\n\n----\n\n".join(markdown_articles) if preamble: combined_article = (u"Title: %s \nOriginal URL: %s\n\n" % (readable_title, url_list[0])) + combined_article return combined_article.encode("utf-8")
def strip_chapter(self, html): """ Strips chapter and gets relevant HTML using Readability :param html: str :return: """ doc = Document(html) if len(doc.summary()) <= 20: content = str(BeautifulSoup(html, 'html.parser').find_all('div', class_=self.main_content_div)[0]) content = '<html><head><meta charset="utf-8"></head>' + content + '</html>' return doc.short_title(), content return (doc.short_title(), str(doc.summary()).replace('<html>', '<html><head><meta charset="utf-8"></head>'))
def strip_chapter(self, html): """ Strips chapter and gets relevant HTML using Readability :param html: str :return: """ doc = Document(html) if len(doc.summary()) <= 20: print 'This page has errors, returning entry-content div raw HTML.' content = str(BeautifulSoup(html, 'html.parser').find_all('div', class_=self.main_content_div)[0]) content = '<html><head><meta charset="utf-8"></head>' + content + '</html>' return doc.short_title(), content return (doc.short_title(), str(doc.summary()).replace('<html>', '<html><head><meta charset="utf-8"></head>'))
def get_data(url): error_num = 0 while True: if error_num >= 10: cprint("Finished Because error_num reached 10 times", "red") return 0, 0 try: req = requests.get(url) if int(req.status_code) == 503: cprint("Google detected the abnormal network traffic", "red") time.sleep(60 * 60) elif int(req.status_code) != 200: cprint("Now Get StatusCode{}: Error_num{}".format(req.status_code, error_num), "red") return 0, 0 else: html = req.text break except ConnectionError: cprint("Now Get ConnectionError: Error_num{}".format(error_num), "red") error_num += 1 time.sleep(5) try: document = Document(html) content_html = document.summary() content_text = lxml.html.fromstring(content_html).text_content().strip() short_title = document.short_title() return short_title, content_text except: return 0, 0
def get_summary(url): html = urllib.request.urlopen(url).read() doc = Document(html) doc.parse(["summary", "short_title"]) readable_article = doc.summary() readable_title = doc.short_title() return readable_article, readable_title
class Article: def __init__(self, url): print('Saving page: {}'.format(url)) res = requests.get(url) self.url = url self.article = Document(res.content) self._add_title() self._save_images() def _add_title(self): self.root = etree.fromstring(self.article.summary()) body = self.root.find('body') title = self.article.title() ascii_title = unidecode(title) if type(title) == unicode else title title_header = etree.HTML('<h2>{}</h2>'.format(ascii_title)) body.insert(0, title_header) def _save_images(self): tmppath = tempfile.mkdtemp() images = self.root.xpath('//img') for img in images: imgsrc = img.get('src') # handle scheme-agnostic URLs if 'http' not in imgsrc and '//' in imgsrc: imgsrc = 'http:{}'.format(imgsrc) # handle relative file paths elif 'http' not in imgsrc: parsed = urlparse(self.url) imgsrc = '{}://{}{}'.format(parsed.scheme, parsed.netloc, imgsrc) filename = os.path.basename(imgsrc) dest = os.path.join(tmppath, filename) try: res = requests.get(imgsrc) except Exception as e: print('Could not fetch image ({}) from "{}"'.format(str(e), imgsrc)) return if res.status_code == 404: print('Could not fetch image (HTTP 404), attempted fetch: "{}", source URL: {}'.format(imgsrc, img.get('src'))) continue with open(dest, 'wb') as f: f.write(res.content) img.set('src', dest) @property def title(self): return self.article.title() @property def html(self): return etree.tostring(self.root)
def find_full_text(html_source): doc = Doc(html_source) content = doc.summary() stripped = strip_tags(content) stripped = to_plain_text(stripped) return stripped
async def enrich(self, result): if not self.soup: return result result.set('title', self.soup.title.string, 0, 'textlength') if result.has('content'): return result parts = [] for txt in self.soup.find_all("noscript"): if txt.string is not None: parts.append(txt.string) html = " ".join(parts).strip() if not html: html = self.soup.all_text() try: doc = Document(html, url=self.url) content = doc.summary(html_partial=True) result.set('content', sanitize_html(content)) # pylint: disable=bare-except except: pass return result
def process(doc, url): html_body = Document(doc) summary = html_body.summary() title = html_body.short_title() images = [] for img in html_body.reverse_tags(html_body.html, 'img'): try: fp = tempfile.NamedTemporaryFile(dir=settings.TEMP_DIR) img_src = urljoin(url, img.get('src')) if re.search(r'http[s]?://', img_src): r = requests.get(img_src, stream=True) write_file(r, fp) else: image = base64.b64decode(img_src.split(',')[1]) fp.write(image) images.append(fp) except Exception: logger.error( 'extractor.formats.html Image Collector Error!!', exc_info=True, extra={'data': { 'url': url }}, ) html = '<h1>' + title + '</h1>' + summary regex = re.compile('\n*', flags=re.IGNORECASE) html = '<p>{}</p>'.format(regex.sub('', html)) soup = BeautifulSoup(html, 'lxml') text = _get_plain_text(soup) return text, images, 1
def main(): novels = { 'cbi': 'https://boxnovel.com/novel/castle-of-black-iron/chapter-', 'sgg': 'https://boxnovel.com/novel/super-gene/chapter-', 'sas': 'https://boxnovel.com/novel/strongest-abandoned-son/chapter-', 'atg': 'https://www.wuxiaworld.com/novel/against-the-gods/atg-chapter-' } total = [] if len(sys.argv) < 4: inicio = int(sys.argv[2]) fim = int(sys.argv[2]) + 1 else: inicio = int(sys.argv[2]) fim = int(sys.argv[3]) + 1 url = novels[sys.argv[1]] for i in range(inicio, fim): response = getPage(url + str(i)) doc = Document(response.text) fileName = re.sub(r'[^a-zA-Z0-9]+', ' ', doc.title()) total.append(doc.summary()) print(i) f = open(fileName + str(fim - 1) + '.html', 'w') for i in total: f.write(i) f.close()
def _parse_article(self, response): feed_entry = response.meta["feed_entry"] il = FeedEntryItemLoader(parent=response.meta["il"]) try: response.text except AttributeError: # Response is not text (e.g. PDF, ...). il.add_value("title", feed_entry.get("title")) il.add_value("content_html", feed_entry.get("summary")) return il.load_item() doc = Document(response.text, url=response.url) il.add_value("title", doc.short_title() or feed_entry.get("title")) summary = feed_entry.get("summary") try: content = doc.summary(html_partial=True) if summary and len(summary) > len(content): # Something probably went wrong if the extracted content is shorter than # the summary. raise Unparseable except Unparseable: content = summary il.add_value("content_html", content) return il.load_item()
def reada(url, cache=True): if cache: cached = memcache.get(key=url) if cached is not None: return cached #file = urllib.urlopen(url) #import urllib2 opener = urllib2.build_opener() opener.addheaders = [('User-agent', 'Mozilla/5.0')] file = opener.open(url) ## enc = 'utf-8' text = '' try: # 1, web html 2 readability raw = Document(file.read(), url=url) html = raw.summary().encode(enc, 'replace') title = raw.short_title() # 2, readability 2 markdown, copy from main data = html.decode(enc) h = html2text.HTML2Text(baseurl=url) h.ignore_images = False h.body_width = 100000 text = h.handle(data) finally: file.close() d = {'url': url, 'title': title, 'content': text} if cache: memcache.add(key=url, value=d, time=600) return d
def extract_article(url): r = requests.get(url) # the the url exists, continue if r.status_code == 200: # extract and parse response url url = parse_url(r.url) # extract html html = r.content.decode('utf-8', errors='ignore') # run boilerpipe BP = Extractor(html=html) # run readability Rdb = Document(html) html = Rdb.summary() # return article data return { 'extracted_title': Rdb.short_title().strip(), 'extracted_content': strip_tags(BP.getText()), } # otherwise return an empty dict else: return {}
def process(doc, params): url = params['url'] html_body = Document(doc) summary = html_body.summary() title = html_body.short_title() images = [] for img in html_body.reverse_tags(html_body.html, 'img'): try: fp = tempfile.NamedTemporaryFile(dir='/tmp/') img_src = urljoin(url, img.get('src')) img_name = None if re.search(r'http[s]?://', img_src): r = requests.get(img_src, stream=True) img_name = get_filename_from_url(img_src) write_file(r, fp) else: img_meta, content = img_src.split(',') image = base64.b64decode(content) img_name = get_filename_from_base64(img_meta) fp.write(image) images.append((img_name, fp)) except Exception: logger.error( 'extractor.formats.html Image Collector Error!!', exc_info=True, extra={'data': {'url': url}}, ) html = '<h1>' + title + '</h1>' + summary html = '<p>{}</p>'.format(html) text = html2text.html2text(html) return text, images, 1, None
def extract_article(url): r = requests.get(url) # the the url exists, continue if r.status_code == 200: # extract and parse response url url = parse_url(r.url) # extract html html = r.content.decode('utf-8', errors='ignore') # run boilerpipe # boilerpipe_extractor = Extractor(html=html) # run readability readability_extractor = Document(html) html = readability_extractor.summary() # return article data return { 'title': readability_extractor.short_title(), 'html': html, 'content': strip_tags(html).encode('utf-8', errors='ignore'), 'url': url } # otherwise return an empty dict else: return {}
class Gist: keyword_pattern = re.compile(r'^[^\d]+$') stop_words = set(get_stop_words('en')) def __init__(self, html): self.html = html self.document = Document(html) @property def title(self): return self.document.short_title() @cached_property def text(self): text = self.document.summary() text = re.sub('<br[^>]+>', '\n', text) text = re.sub('</?p[^>]+>', '\n\n', text) text = re.sub('<[^>]+>', '', text) text = re.sub('^[ \t]+$', '', text) text = re.sub('\n{3,}', '\n\n', text, flags=re.MULTILINE) return text @staticmethod def _common_prefix(one, two): parallelity = [x == y for x, y in zip(one, two)] + [False] return parallelity.index(False) @classmethod def _find_representative(cls, stem, text): tokens = text.split() prefixes = {token: cls._common_prefix(token, stem) for token in tokens} best = lambda token: (-token[1], len(token[0])) return sorted(prefixes.items(), key=best)[0][0] @classmethod def _is_good_keyword(cls, word): return (word not in cls.stop_words) and \ cls.keyword_pattern.match(word) @classmethod def find_keywords(cls, text): whoosh_backend = SearchForm().searchqueryset.query.backend if not whoosh_backend.setup_complete: whoosh_backend.setup() with whoosh_backend.index.searcher() as searcher: keywords = searcher.key_terms_from_text( 'text', text, numterms=10, normalize=False) keywords = list(zip(*keywords))[0] if keywords else [] keywords = [cls._find_representative(keyword, text) for keyword in keywords] keywords = [keyword for keyword in keywords if cls._is_good_keyword(keyword)] #no double keywords in list keywords = list(set(keywords)) #no punctuation in suggested keywords keywords = [''.join(c for c in s if c not in string.punctuation) for s in keywords] return keywords @property def keywords(self): return self.find_keywords(self.text)
def run(index): print "Index %d" % index dirname = "data/%04d" % index # url of english article url = open(dirname + "/url_en.txt").read() # download html html = urllib.urlopen(url).read().decode('latin-1') # apply readability document = Document(html) article = document.summary() article = nltk.clean_html(article) # replace latin characters article = re.sub(u' ', u'\n', article) article = re.sub(u'\x92', u'`', article) article = re.sub(u'\x96', u'-', article) # article_en.txt output = codecs.open(dirname + "/article_en.txt", 'w', encoding='ascii', errors='ignore') output.write(article) output.close() # title.txt output = codecs.open(dirname + "/title.txt", 'w', encoding='ascii', errors='ignore') output.write(document.title()) output.close()
def extract_article(url): r = requests.get(url) # the the url exists, continue if r.status_code == 200: # extract and parse response url url = parse_url(r.url) # extract html html = r.content.decode('utf-8', errors='ignore') # run boilerpipe # boilerpipe_extractor = Extractor(html=html) # run readability readability_extractor = Document(html) html = readability_extractor.summary() # return article data return { 'title': readability_extractor.short_title(), 'html': html, 'content': strip_tags(html).encode('utf-8', errors='ignore'), 'url': url } # otherwise return an empty dict else: return {}
def process(doc): html_body = Document(doc) summary = html_body.summary() title = html_body.short_title() text = text_maker.handle(summary) return title, text
def main(): html = urllib.urlopen("http://habrahabr.ru/post/150756/").read() doc = Document(html) short_title = doc.short_title() readable_article = doc.summary() f = open("C:\\users\\mykola\\documents\\%s.html" % short_title, "wb") f.write(readable_article.encode("utf-8")) f.close()
def _getResponseText(self, response): ''' (reponse) -> Text Returns text within the body of an HttpResponse object. ''' readability = Document(response.body) content = readability.title() + readability.summary() return content
def extract(self, html): # https://github.com/buriy/python-readability/blob/master/readability/readability.py doc = Document(html) self.__title = doc.title() self.__html = doc.summary() self.__md = html2text.html2text(self.__html) self.__text = self.__format_to_text(self.__html) return self.__text
def main(): html = urllib.urlopen("http://habrahabr.ru/post/150756/").read() doc = Document(html) short_title = doc.short_title() readable_article = doc.summary() f = open("C:\\users\\mykola\\documents\\%s.html" % short_title, "wb") f.write(readable_article.encode("utf-8")) f.close()
def download_via_url(url): response = requests.get(url) doc = Document(response.text) title = doc.title() summary = doc.summary() soup = BeautifulSoup(summary, "html.parser") return title, soup.text
def get_article_from_item(self, item): url = item['link'] logging.debug(url) author = 'n/a' if item.has_key('author'): author = item.author html = urllib.urlopen(url).read() doc = Document(html) return Article(doc.title(), doc.short_title(), author, doc.summary())
def crawl_url(url): html = requests.get(url) doc = Document(html.content) content = doc.summary().encode('utf-8') title = doc.title().encode('utf-8') return { 'content': content, 'title': title }
def process_html(html): doc = Document(html) return { 'content': doc.content(), 'clean_html': doc.get_clean_html(), 'short_title': doc.short_title(), 'summary': html_to_text(doc.summary()), 'title': doc.title() }
def news(): search = request.args.get('q') if request.args.get('count') : count = request.args.get('count') else : count=10 if request.args.get('offset') : offset = request.args.get('offset') else : offset=0 if search: headers = {'Ocp-Apim-Subscription-Key': 'd94125558b884a309dd71f9e1aa8b9fb'} params = urllib.parse.urlencode({ 'q': search, 'count': count, 'offset': offset, 'mkt': 'en-id', 'safesearch': 'Moderate', }) try: conn_url = http.client.HTTPSConnection('api.cognitive.microsoft.com') conn_url.request("GET", "/bing/v7.0/news/search?%s" % params, "{body}", headers) response = conn_url.getresponse() data = response.read().decode('utf-8') data_array = json.loads(data) conn_url.close() except Exception as e: print("[Errno {0}] {1}".format(e.errno, e.strerror)) print(data_array) i=0 for result in data_array['value']: try: response = requests.get(result['url'], verify=False, allow_redirects=False) except requests.exceptions.ConnectionError: print(result['url'], "Connection refused") response = requests.get("https://pens.ac.id", verify=False) print(result['url']) doc = Document(response.content) raw = BeautifulSoup(doc.summary(html_partial=True), 'html.parser').get_text() result['sentiment'] = int(getSentiment(raw)) print("SENTIMENT : ", result['sentiment']) result['status'] = analyze(raw) result['id_rank'] = i if result['datePublished']: result['datePublished'] = parser.parse(result['datePublished']) result['datePublished'] = result['datePublished'].strftime('Diterbitkan pada %d %b %Y pukul %I:%M WIB') print(result['datePublished']) i+=1 return render_template("news.html", data=data_array) else: return render_template("news.html")
def get_article_from_item(self, item): url = item['link'] logging.debug(url) author = 'n/a' if item.has_key('author'): author = item.author html = urllib.urlopen(url).read() doc = Document(html) return Article(doc.title(), doc.short_title(), author, doc.summary())
def readability_extractor(self, html): try: doc = Document(html) content = doc.summary() if content and content != "": return content else: return self.html2text_extractor(html) except: return self.html2text_extractor(html)
def extract_article(self): """Returns only readable content Returns: data - { 'title': 'Title of the article', 'content': 'HTML body of the article' } """ doc = Document(self._html) return {'title': doc.title(), 'content': doc.summary()}
def extract_article(self): """Returns only readable content Returns: data - { 'title': 'Title of the article', 'content': 'HTML body of the article' } """ doc = Document(self._html) return {'title': doc.title(), 'content': doc.summary()}
def extract_by_readability(html): document = Document(html) def strip_html(html): return re.sub(r'<[^<]+?>', '', html) return { 'title': ensure_unicode(document.short_title()), 'body': strip_html(ensure_unicode(document.summary())), }
def body_via_readability(page_html, source_url): """ Readbility is good at article + title. """ obj = Document(page_html) body = obj.summary() if not body: return None return html.prepare(body, source_url)
def extract_data(self, patchurl): try: f = requests.get(patchurl) html = f.content doc = Document(html) title = doc.short_title() summary = doc.summary() return smart_str(title), smart_str(summary) except: return None, None
def get_content_from_link(url): response = requests.get(url, verify=False) print(url) doc = Document(response.content) raw = BeautifulSoup(doc.summary(html_partial=True), 'html.parser').get_text() data = raw.replace('\n', ' ').replace('\r', '').replace('\"', '\'') data = re.sub(' +', ' ', data) url = url return [[url, data]]
def body_via_readability(page_html, source_url): """ Readbility is good at article + title. """ obj = Document(page_html) body = obj.summary() if not body: return None return html.prepare(body, source_url)
def extract_data(self, patchurl): try: f = requests.get(patchurl) html = f.content doc = Document(html) title = doc.short_title() summary = doc.summary() return smart_str(title), smart_str(summary) except: return None, None
def decode_doc(doc, url): #print('doc') cs = re.compile(b'^<(meta|META).*charset=("|\')?([^ "\']*)') pkey = re.compile(b'^<(meta|META).*keywords.*content=("|\')?([^ "\']*)') codec = None keywords = None #print(*doc) for l in doc : if (l.startswith(b'<meta') or l.startswith(b'<META')) : if codec is None and (b'charset' in l) : m = cs.match(l) codec = m.group(3).decode() if keywords is None and b'keywords' in l : m = pkey.match(l) if m : keywords = m.group(3) sdoc = [] for l in doc : try : l = l.decode(codec) except : l = '' sdoc.append(l) try : if keywords : keywords = keywords.decode(codec) else : #print(*sdoc, sep = '\n') keywords = '' keywords = re.split(r'[ ,;\|]',keywords) #print(keywords.encode('utf8')) except : pass #if sum(len(x) for x in sdoc) < 1000 : return doc = '\n'.join(sdoc) #if len(doc) < 1000 :return try : doc = Document(doc) title = doc.short_title() content = doc.summary() except : return #print(doc.summary().encode('utf8')) #print(doc.short_title().encode('utf8')) data = {"url":url, 'keywords':keywords, 'title': title, 'content':content} return data
def try_readability(): html = urllib.request.urlopen(ARTICLE).read() doc = Document(html) con = BeautifulSoup(doc.summary()).get_text() tit = doc.short_title() print("===READABILITY===") print("=CONTENT=") print(con) print("=TITLE=") print(tit)
def parse_news_content(self, response): for link in self.full_article_link_extractor.extract_links(response): request = response.request.replace(url=link.url) yield request item = self._create_item(response) if item is not None: doc = Document(response.body) item['title'] = doc.short_title() item['content'] = html2text.html2text(doc.summary()) yield item
def import_html(results, content): content = Document(content) converter = HTML2Text() converter.body_width = 0 body = content.summary() text = BeautifulSoup(body).get_text(" ") results.investigation.update(name=content.short_title(), import_md=converter.handle(body), import_text=text)
def extract_url_content(self, url=None): if not url: url = self.url url_parse = urlparse(url) headers = {} if url_parse.netloc != "t.co": user_agent = "Mozilla/5.0 (X11; Linux x86_64; rv:9.0.1) Gecko/20100101 Firefox/9.0.1 Iceweasel/9.0.1" headers['User-Agent'] = user_agent content = requests.get(url, headers=headers) self.content_type = content.headers.get('content-type') self.status_code = content.status_code self.content = content.text self.url = self.clean_url(self.url) self.url = self.url_morph(content.url) self.image = self.find_taller_image(self.content) if self.image: self.logger.info("found image : %s"%self.image) self.url_parse = urlparse(self.url) if url_parse.netloc in oembed.keys(): print "found oembed" mod = oembed[url_parse.netloc] self.content = mod.get_widget(url) self.summary = self.content self.title = os.path.basename(url_parse.path) self.content_type = "collectr/parsed" self.tags = [mod.get_tag()] self.tagstring = mod.get_tag() return if self.status_code >= 400: raise UrlExtractException("Can't extract content for %s (http<%d>)" % (url, content.status_code)) elif "image" in self.content_type: print "log: content type : image" self.summary = """<img src="%s" />""" % self.url self.title = self.url elif "html" in self.content_type: doc = Document(self.content) self.summary = doc.summary() try: self.title = doc.short_title() except AttributeError: self.title = u"No title" else: self.summary = None self.title = os.path.basename(url_parse.path)
def search(): search = request.args.get('q') if request.args.get('count') : count = request.args.get('count') else : count=10 if request.args.get('offset') : offset = request.args.get('offset') else : offset=0 if search: headers = {'Ocp-Apim-Subscription-Key': 'cfbabad517954c1f97ac66623a4fa7ee'} params = urllib.parse.urlencode({ 'q': search, 'count': count, 'offset': offset, 'mkt': 'id-ms', 'safesearch': 'Moderate', }) try: conn_url = http.client.HTTPSConnection('api.cognitive.microsoft.com') conn_url.request("GET", "/bing/v7.0/search?%s" % params, "{body}", headers) response = conn_url.getresponse() data = response.read().decode('utf-8') data_array = json.loads(data) conn_url.close() except Exception as e: print("[Errno {0}] {1}".format(e.errno, e.strerror)) print(data_array) i=0 for result in data_array['webPages']['value']: try: response = requests.get(result['url'], verify=False,stream=True) except requests.exceptions.ConnectionError: print(result['url'], "Connection refused") response = requests.get("https://pens.ac.id", verify=False) print(result['url']) doc = Document(response.content) raw = BeautifulSoup(doc.summary(html_partial=True), 'html.parser').get_text() result['sentiment'] = int(getSentiment(raw)) print("SENTIMENT : ", result['sentiment']) result['status'] = analyze(raw) result['id_rank'] = i i+=1 return render_template("index.html", data=data_array) else: return render_template("index.html")
def tell_url(un, url): buff = urllib2.urlopen(url) doc = Document(buff.read()) html_buff = doc.summary() text_buff = extract_text(html_buff) class_name = un.tell_buff(text_buff) if class_name: class_name_human = un.get_class_name_human(class_name) else: class_name_human = None return class_name_human
def tell_url(un, url): buff = urllib2.urlopen(url) doc = Document(buff.read()) html_buff = doc.summary() text_buff = extract_text(html_buff) class_name = un.tell_buff(text_buff) if class_name: class_name_human = un.get_class_name_human(class_name) else: class_name_human = None return class_name_human
def handle(self, path): r = requests.get(path) doc = Document(r.text) content = doc.summary(html_partial=True) content = self.prettify(doc, content) if len(content) < 1000: content = r.text content = content.encode('utf8') return content
def get_article(d): url = d['url'] if table.find_one(url=url): return print "fetching stuff for %s" % url d['html'] = requests.get(url).content try: doc = Document(d['html']) d['summary'] = html.fromstring(doc.summary()).xpath('string()') d['content'] = html.fromstring(doc.content()).xpath('string()') d['title'] = doc.title() except Exception, e: print e
def parse(filename): html = open(filename, encoding="latin").read() doc = Document(html) summary = doc.summary() summary = re.sub('(<map.*?</map>)', '', summary, re.M) summary = re.sub(r"<img.*?usemap=.*?>", '', summary, re.M) summary = re.sub(r'<a href="index.html"><img.*?/></a>', '', summary, re.M) if 'href="index.html"' in summary: raise Exception("FAIIILEEED") print("<small>" + doc.short_title() + "</small>") print("<p>" + summary + "<p>") print("<p class='breakhere'></p>")
def handle(self, url, content): # Fix of issue27 # content = re.sub('href="(.*?)"', '', content); doc = Document(content) try: hp = HParser(doc.summary()) text = doc.title() + '\n' + hp.tag_list[0].rtext().replace('==+NL+==', '\n') text = '\n'.join(list(map(lambda l: l.strip(), text.split('\n')))) text = re.sub('\n{3,}', '\n\n', text).strip() return text except: self.logger.exception('Fail to parse the summary from readability!') raise
def extract(html): try: doc = Document(html) article = doc.summary() title = doc.short_title() return { 'title': title, 'article': html_to_text(article), 'full_text': html_to_text(html) } except: logging.exception('extract html') return {}
def import_html(results, content): content = Document(content) converter = HTML2Text() converter.body_width = 0 body = content.summary() text = BeautifulSoup(body).get_text(" ") results.investigation.update( name=content.short_title(), import_md=converter.handle(body), import_text=text)
def scrape(URL): """ Return the text of the article found at URL Some whitespace changes will usually occur. """ html = urllib.request.urlopen(URL).read() doc = Document(html) doc.parse(["summary", "short_title"]) readable_article = doc.summary() soup = BeautifulSoup(readable_article, 'html.parser') text = soup.get_text() return text
def read_command(api, args): from readability.readability import Document import html2text h = html2text.HTML2Text() h.inline_links = False h.ignore_images = True h.ignore_emphasis = True res = requests.get(args.url) if res.ok: article = Document(res.content) print article.short_title() print h.handle(article.summary()) else: print res.headers['status']
def parse_html(url): response = request(url) if not response: return response document = Document(response.content) doc = { 'titulo': document.short_title(), 'texto': document.summary(), 'site': urlparse(url).netloc, 'url': get_object_or_404(Url, url=url), 'imagem': get_image(response.content, urlparse(url).netloc) } return doc
def extractMainArticle(html): p = Document(html) readable_article = p.summary() readable_title = p.short_title() soup = BeautifulSoup(readable_article) text_nodes = soup.findAll(text=True) text = ''.join(text_nodes) #text = readable_title + " " + text #return text wtext = {"title": readable_title, "text": text} return wtext
def extractMainArticle(html): p = Document(html) readable_article = p.summary() readable_title = p.short_title() soup = BeautifulSoup(readable_article) text_nodes = soup.findAll(text=True) text = ''.join(text_nodes) #text = readable_title + " " + text #return text wtext = {"title":readable_title, "text": text} return wtext
def make_readable(url): try: html = urllib2.urlopen(url).read() except urllib2.URLError: return None document = Document(html) document_dict = { 'title': document.title(), 'summary': document.summary(), 'content': document.content(), 'short_title': document.short_title() } return document_dict
def _parse_article(self, response): feed_entry = response.meta["feed_entry"] il = FeedEntryItemLoader(parent=response.meta["il"]) doc = Document(response.text, url=response.url) il.add_value("title", doc.short_title() or feed_entry.get("title")) summary = feed_entry.get("summary") try: content = doc.summary(html_partial=True) if summary and len(summary) > len(content): # Something probably went wrong if the extracted content is shorter than # the summary. raise Unparseable except Unparseable: content = summary il.add_value("content_html", content) return il.load_item()