def __init__(self, _id, content=None, **kwargs): self._id = str(_id) self.paper = Paper() page_data = col_paper.find_one({"url_id": self._id}) if page_data: # 数据库中已经存在,直接返回 return if content is None: self.content = Downloader(host + self._id)() if self.content: self.valid = True else: logger.error("当前网页为空,无法进行解析\turl_id:" + self._id) self.valid = False return else: self.valid = True self.content = content self.selector = etree.HTML(self.content) self.paper.url_id = self._id
def get_paper(self, paper_url=None, paper_id=None): """ Load paper details for the paper given by detail page URL or numeric ID """ paper_url = ('%svo020.asp?VOLFDNR=%s' % (self.config['scraper']['base_url'], paper_id)) logging.info("Getting paper %d from %s", paper_id, paper_url) # Stupid re-try concept because AllRis sometimes misses # start < at tags at first request. try_counter = 0 while True: try: response = self.get_url(paper_url) if not response: return if "noauth" in response.url: logging.warn("Paper %s in %s seems to private", paper_id, paper_url) return text = response.text doc = html.fromstring(text) data = {} # Beratungsfolge-Table checken # lets hope we always have this table table = self.table_css(doc)[0] self.consultation_list_start = False last_headline = '' for line in table: if line.tag == 'tr': headline = line[0].text elif line.tag == 'td': headline = line.text else: logging.error("ERROR: Serious error in data table. " "Unable to parse.") if headline: headline = headline.split(":")[0].lower() if headline[-1] == ":": headline = headline[:-1] if headline == "betreff": value = line[1].text_content().strip() # There is some html comment with a script # tag in front of the text which we remove. value = value.split("-->")[1] # remove all multiple spaces from the string data[headline] = " ".join(value.split()) elif headline in [ 'verfasser', u'federführend', 'drucksache-art' ]: data[headline] = line[1].text.strip() elif headline in ['status']: data[headline] = line[1].text.strip() # related papers if len(line) > 2: if len(line[3]): # Gets originalId. is there something # else at this position? (will break) paper_id = line[3][0][0][1][0].get( 'href').split('=')[1].split('&')[0] data['relatedPaper'] = [ Paper(originalId=paper_id) ] # Lot's of scraping just because of the date (?) elif headline == "beratungsfolge": # The actual list will be in the next row # inside a table, so we only set a marker. self.consultation_list_start = True elif self.consultation_list_start: elem = line[0][0] # The first line is pixel images, so skip # it, then we need to jump in steps of two. amount = (len(elem) - 1) / 2 consultations = [] date_list = [] i = 0 item = None for elem_line in elem: if i == 0: i += 1 continue """ Here we need to parse the actual list which can have different forms. A complex example can be found at http://ratsinfo.aachen.de/bi/vo020.asp?VOLFDNR=10822 The first line is some sort of headline with the committee in question and the type of consultation. After that 0-n lines of detailed information of meetings with a date, transscript and decision. The first line has 3 columns (thanks to colspan) and the others have 7. Here we make every meeting a separate entry, we can group them together later again if we want to. """ # now we need to parse the actual list # those lists new_consultation = Consultation() new_consultation.status = \ elem_line[0].attrib['title'].lower() if len(elem_line) == 3: # The order is "color/status", name of # committee / link to TOP, more info we # define a head dict here which can be # shared for the other lines once we find # another head line we will create a new # one here. new_consultation.role = \ elem_line[2].text.strip() # Name of committee, e.g. # "Finanzausschuss", unfort. without id #'committee' : elem_line[1].text.strip(), # For some obscure reasons sometimes action # is missing. elif len(elem_line) == 2: # The order is "color/status", name of # committee / link to TOP, more info. status = \ elem_line[0].attrib['title'].lower() # We define a head dict here which can be # shared for the other lines once we find # another head line we will create a new # one here. # name of committee, e.g. # "Finanzausschuss", unfort. without id #'committee' : elem_line[1].text.strip(), elif len(elem_line) == 7: try: # This is about line 2 with lots of # more stuff to process. # Date can be text or a link with that # text. # We have a link (and ignore it). if len(elem_line[1]) == 1: date_text = elem_line[1][0].text else: date_text = elem_line[1].text date_list.append( datetime.datetime.strptime( date_text.strip(), "%d.%m.%Y")) if len(elem_line[2]): # Form with silfdnr and toplfdnr # but only in link (action= # "to010.asp?topSelected=57023") form = elem_line[2][0] meeting_id = form[0].attrib[ 'value'] new_consultation.meeting = [ Meeting(originalId=meeting_id) ] # Full name of meeting, e.g. # "A/31/WP.16 öffentliche/ # nichtöffentliche Sitzung des # Finanzausschusses" #item['meeting'] = \ # elem_line[3][0].text.strip() else: # No link to TOP. Should not be # possible but happens. # (TODO: Bugreport?) # Here we have no link but the text # is in the TD directly - will be # scaped as meeting. #item['meeting'] = \ # elem_line[3].text.strip() logging.warn( "AgendaItem in consultation " "list on the web page does not " "contain a link to the actual " "meeting at paper %s", paper_url) toplfdnr = None if len(elem_line[6]) > 0: form = elem_line[6][0] toplfdnr = form[0].attrib['value'] if toplfdnr: new_consultation.originalId = \ "%s-%s" % (toplfdnr, paper_id) # actually the id of the transcript new_consultation.agendaItem = \ AgendaItem( originalId=toplfdnr) # e.g. "ungeändert beschlossen" new_consultation.agendaItem.result \ = elem_line[4].text.strip() consultations.append( new_consultation) else: logging.error( "missing agendaItem ID in " "consultation list at %s", paper_url) except (IndexError, KeyError): logging.error( "ERROR: Serious error in " "consultation list. Unable to " "parse.") logging.error( "Serious error in consultation " "list. Unable to parse.") return [] i += 1 # Theory: we don't need this at all, because it's # scraped at meeting. #data['consultations'] = consultations # set the marker to False again as we have read it self.consultation_list_start = False last_headline = headline # We simply ignore the rest (there might not be much more # actually). # The actual text comes after the table in a div but it's not # valid XML or HTML this using regex. data['docs'] = self.body_re.findall(response.text) first_date = False for single_date in date_list: if first_date: if single_date < first_date: first_date = single_date else: first_date = single_date paper = Paper(originalId=paper_id) paper.originalUrl = paper_url paper.name = data['betreff'] paper.description = data['docs'] if 'drucksache-art' in data: paper.paperType = data['drucksache-art'] if first_date: paper.publishedDate = first_date.strftime("%d.%m.%Y") # see theory above #if 'consultations' in data: # paper.consultation = data['consultations'] paper.auxiliaryFile = [] # get the attachments step 1 (Drucksache) file_1 = self.attachment_1_css(doc) if len(file_1): if file_1[0].value: href = ('%sdo027.asp' % self.config['scraper']['base_url']) original_id = file_1[0].value name = 'Drucksache' main_file = File(originalId=original_id, name=name) main_file = self.get_file(main_file, href, True) paper.mainFile = main_file # get the attachments step 2 (additional attachments) files = self.attachments_css(doc) if len(files) > 0: if len(files[0]) > 1: if files[0][1][0].text.strip() == "Anlagen:": for tr in files[0][2:]: link = tr[0][0] href = ("%s%s" % (self.config['scraper']['base_url'], link.attrib["href"])) name = link.text path_tokens = link.attrib["href"].split('/') original_id = "%d-%d" % (int( path_tokens[4]), int(path_tokens[6])) aux_file = File(originalId=original_id, name=name) aux_file = self.get_file(aux_file, href) paper.auxiliaryFile.append(aux_file) print paper.auxiliaryFile if not len(paper.auxiliaryFile): del paper.auxiliaryFile oid = self.db.save_paper(paper) return except (KeyError, IndexError): if try_counter < 3: logging.info("Try again: Getting paper %d from %s", paper_id, paper_url) try_counter += 1 else: logging.error("Failed getting paper %d from %s", paper_id, paper_url) return
def get_meeting(self, meeting_url=None, meeting_id=None): """ Load meeting details (e.g. agendaitems) for the given detail page URL or numeric ID """ meeting_url = ("%sto010.asp?selfaction=ws&template=xyz&SILFDNR=%s" % (self.config['scraper']['base_url'], meeting_id)) logging.info("Getting meeting %d from %s", meeting_id, meeting_url) r = self.get_url(meeting_url) if not r: return # If r.history has an item we have a problem if len(r.history): if r.history[0].status_code == 302: logging.info("Meeting %d from %s seems to be private", meeting_id, meeting_id) else: logging.error( "Strange redirect %d from %s with status code %s", meeting_id, meeting_url, r.history[0].status_code) return h = HTMLParser.HTMLParser() xml = str(r.text.encode('ascii', 'xmlcharrefreplace')) parser = etree.XMLParser(recover=True) root = etree.fromstring(xml, parser=parser) meeting = Meeting(originalId=meeting_id) # special area special = {} for item in root[0].iterchildren(): special[item.tag] = item.text # Woher kriegen wir das Datum? Nur über die Übersicht? #if 'sisb' in special: #if 'sise' in special: if 'saname' in special: meeting.type = special['saname'] # head area head = {} for item in root[1].iterchildren(): if item.text: head[item.tag] = h.unescape(item.text) else: head[item.text] = '' if 'sitext' in head: meeting.name = head['sitext'] if 'raname' in head: meeting.room = head['raname'] if 'raort' in head: meeting.address = head['raort'] agendaitems = [] for item in root[2].iterchildren(): elem = {} for e in item.iterchildren(): elem[e.tag] = e.text section = [elem['tofnum'], elem['tofunum'], elem['tofuunum']] section = [x for x in section if x != "0"] elem['section'] = ".".join(section) agendaitem = AgendaItem() agendaitem.originalId = int(elem['tolfdnr']) agendaitem.public = (elem['toostLang'] == u'öffentlich') #agendaitem.name = elem['totext1'] # get agenda detail page # TODO: Own Queue time.sleep(self.config['scraper']['wait_time']) agendaitem_url = ( '%sto020.asp?selfaction=ws&template=xyz&TOLFDNR=%s' % (self.config['scraper']['base_url'], agendaitem.originalId)) logging.info("Getting agendaitem %d from %s", agendaitem.originalId, agendaitem_url) agendaitem_r = self.get_url(agendaitem_url) if not agendaitem_r: return if len(agendaitem_r.history): logging.info("Agenda item %d from %s seems to be private", meeting_id, meeting_url) else: agendaitem_xml = agendaitem_r.text.encode( 'ascii', 'xmlcharrefreplace') # TODO: mixup of agendaitem_parser / parser below? agendaitem_parser = etree.XMLParser(recover=True) agendaitem_root = etree.fromstring(agendaitem_xml, parser=parser) add_agenda_item = {} for add_item in agendaitem_root[0].iterchildren(): if add_item.tag == "rtfWP" and len(add_item) > 0: try: agendaitem.resolution_text = h.unescape( etree.tostring(add_item[0][1][0])) except: logging.warn( "Unable to parse resolution text at " "%s", agendaitem_url) else: if add_item.text: add_agenda_item[add_item.tag] = h.unescape( add_item.text) if 'toptext' in add_agenda_item: agendaitem.name = add_agenda_item['toptext'] # there are papers with id = 0. we don't need them. if int(elem['volfdnr']): consult_id = (unicode(agendaitem.originalId) + unicode(int(elem['volfdnr']))) consultation = Consultation(originalId=consult_id) paper_id = int(elem['volfdnr']) if 'voname' in add_agenda_item: consultation.paper = Paper( originalId=paper_id, name=add_agenda_item['voname']) else: consultation.paper = Paper(originalId=paper_id) agendaitem.consultation = [consultation] if 'vobetr' in add_agenda_item: if add_agenda_item['vobetr'] != agendaitem.name: logging.warn( "different values for name: %s and %s", agendaitem.name, add_agenda_item['vobetr']) if hasattr(self, 'paper_queue'): self.paper_queue.add(int(elem['volfdnr'])) if 'totyp' in add_agenda_item: agendaitem.result = add_agenda_item['totyp'] agendaitems.append(agendaitem) meeting.agendaItem = agendaitems oid = self.db.save_meeting(meeting) logging.info("Meeting %d stored with _id %s", meeting_id, oid)