def get_body(self): element_body = self.get_element_body() try_drop_tree(self.parser, element_body, "li.button-social") try_drop_tree(self.parser, element_body, "div.sharecount") clean_relativ_urls(element_body, "http://presseurop.eu") return self.parser.tostring(element_body)
def get_body(self): element_body = self.get_element_body() remove_from_selector_list(self.parser, element_body, ["p.auteur", "h4"]) try_remove_from_selector_list(self.parser, element_body, ["p.tag", "div.alire", self.element_title_selector, "h4"]) try_drop_tree(self.parser, element_body, "script") clean_relativ_urls(element_body, "http://ecrans.fr") return self.parser.tostring(element_body)
def get_body(self): element_body = self.get_element_body() remove_from_selector_list(self.parser, element_body, ["p.auteur", "h4"]) try_remove_from_selector_list( self.parser, element_body, ["p.tag", "div.alire", self.element_title_selector, "h4"]) try_drop_tree(self.parser, element_body, "script") clean_relativ_urls(element_body, "http://ecrans.fr") return self.parser.tostring(element_body)
def get_content(self): """ Get the message content. This page has a date, but it is less precise than the main list page, so we only use it for the message content. """ try: content = self.parser.select(self.document.getroot(), "div.txtMessage div.contenu", 1) except BrokenPageError: # This happens with some old messages (2007) content = self.parser.select(self.document.getroot(), "div.txtMessage", 1) content = make_links_absolute(content, self.url) try_drop_tree(self.parser, content, "script") return self.parser.tostring(content)
def get_content(self): """ Get the message content. This page has a date, but it is less precise than the main list page, so we only use it for the message content. """ try: content = self.parser.select(self.document.getroot(), 'div.txtMessage div.contenu', 1) except BrokenPageError: # This happens with some old messages (2007) content = self.parser.select(self.document.getroot(), 'div.txtMessage', 1) content = make_links_absolute(content, self.url) try_drop_tree(self.parser, content, 'script') return self.parser.tostring(content)
def get_body(self): element_body = self.get_element_body() remove_from_selector_list(self.parser, element_body, [self.element_title_selector]) drop_comments(element_body) try_drop_tree(self.parser, element_body, "script") try_drop_tree(self.parser, element_body, "liste") try_remove_from_selector_list(self.parser, element_body, [ "div#article-comments", "div.infos", "div.photo", "div.art_bandeau_bottom", "div.view", "span.auteur_long", "#toolsbar", 'link' ]) for image in self.parser.select(element_body, 'img'): if image.attrib['src'].endswith('coeur-.gif'): image.drop_tree() for div in self.parser.select(element_body, 'div'): if div.text == ' Player Figaro BFM ': obj = div.getnext() a = obj.getnext() if obj.tag == 'object': obj.drop_tree() if a.tag == 'a' and 'BFM' in a.text: a.drop_tree() div.drop_tree() # This part of the article seems manually generated. for crappy_title in self.parser.select(element_body, 'p strong'): if crappy_title.text == 'LIRE AUSSI :' or crappy_title.text == 'LIRE AUSSI:': # Remove if it has only links for related in crappy_title.getparent().itersiblings(tag='p'): if len(related) == len(list( related.iterchildren(tag='a'))): related.drop_tree() else: break crappy_title.drop_tree() txts = element_body.find_class("texte") if len(txts) > 0: txts[0].drop_tag() element_body.tag = "div" return self.parser.tostring(element_body)
def get_body(self): element_body = self.get_element_body() remove_from_selector_list(self.parser, element_body, [self.element_title_selector]) drop_comments(element_body) try_drop_tree(self.parser, element_body, "script") try_drop_tree(self.parser, element_body, "liste") try_remove_from_selector_list(self.parser, element_body, ["div#article-comments", "div.infos", "div.photo", "div.art_bandeau_bottom", "div.view", "span.auteur_long", "#toolsbar", 'link']) for image in self.parser.select(element_body, 'img'): if image.attrib['src'].endswith('coeur-.gif'): image.drop_tree() for div in self.parser.select(element_body, 'div'): if div.text == ' Player Figaro BFM ': obj = div.getnext() a = obj.getnext() if obj.tag == 'object': obj.drop_tree() if a.tag == 'a' and 'BFM' in a.text: a.drop_tree() div.drop_tree() # This part of the article seems manually generated. for crappy_title in self.parser.select(element_body, 'p strong'): if crappy_title.text == 'LIRE AUSSI :' or crappy_title.text == 'LIRE AUSSI:': # Remove if it has only links for related in crappy_title.getparent().itersiblings(tag='p'): if len(related) == len(list(related.iterchildren(tag='a'))): related.drop_tree() else: break crappy_title.drop_tree() txts = element_body.find_class("texte") if len(txts) > 0: txts[0].drop_tag() element_body.tag = "div" return self.parser.tostring(element_body)
def get_body(self): div = self.document.getroot().find('.//div[@class="sectbody"]') try_drop_tree(self.parser, div, "div.anchor") clean_relativ_urls(div, "http://taz.de") return self.parser.tostring(div)