def extractSeriesReleases(self, seriesPageUrl, soup): chapter_divs = soup.find_all("a", class_='chapter-link') retval = [] for linka in chapter_divs: state = linka['data-preprocessor-state'] vol = linka['data-preprocessor-vol'] chp = linka['data-preprocessor-chp'] name = linka['data-preprocessor-name'] index = linka['data-preprocessor-index'] title = linka['data-preprocessor-title'] reldate = linka['data-preprocessor-reldate'] href = linka['href'] itemDate, status = parsedatetime.Calendar().parse(reldate) if status < 1: continue reldate = time.mktime(itemDate) relurl = common.util.urlFuncs.rebaseUrl(linka['href'] + "/", seriesPageUrl) print([vol, chp, state, linka]) raw_item = {} raw_item['srcname'] = "Qidian" raw_item['published'] = float(reldate) raw_item['linkUrl'] = relurl if state == '0': raw_msg = msgpackers.buildReleaseMessageWithType(raw_item, title, None, index, None, tl_type='translated', prefixMatch=True) retval.append(msgpackers.serialize_message(raw_msg)) elif state == "2": raw_msg = msgpackers.buildReleaseDeleteMessageWithType(raw_item, title, None, index, None, tl_type='translated', prefixMatch=True) retval.append(msgpackers.serialize_message(raw_msg)) else: print("Unknown state:", state) # Do not add series without 3 chapters. if len(retval) < 3: self.log.info("Less then three chapters!") return [] # if not retval: # self.log.info("Retval empty?!") # return [] # return [] return retval