# 获取网页里的图片url def fetch_pic(): browser = coderpig.init_browser() for i in range(1, max_page_count + 1): url = weibo_url + containerid + "&page=" + str(i) browser.get(url) print("开始解析 ====== 第%d页 ====== " % i) html_text = browser.page_source soup = coderpig.get_bs(html_text) data_json = soup.find('pre').get_text() data_dict = json.loads(data_json) cards = data_dict['data']['cards'] for card in cards: if 'mblog' in card: mblog = card['mblog'] if 'pics' in mblog: pics = mblog['pics'] for pic in pics: if 'large' in pic: pic_url = pic['large']['url'] coderpig.download_pic(pic['large']['url'], save_path) browser.close() if __name__ == '__main__': coderpig.init_https() coderpig.is_dir_existed(save_path) fetch_pic()
for li in lis: if li._class != 'longword': url_list.append((base_url + li.find('a')['href'])) return url_list # 获取套图里的图片 def catch_pic_diagrams(url): resp = coderpig.get_resp(url).decode('utf-8') soup = coderpig.get_bs(resp) dir_name = soup.find('title').get_text()[:-5] save_path = pic_save_path + dir_name + '/' coderpig.is_dir_existed(save_path) # 通过末页获取总共有多少页 page_count = int(moye_pattern.match(soup.find('a', text='末页')['href']).group(1)) for page in range(1, page_count + 1): page_resp = coderpig.get_resp(url.replace('.html', '_' + str(page) + '.html')).decode('utf-8') page_soup = coderpig.get_bs(page_resp) # 获取本页的图片 imgs = page_soup.find('p', attrs={'align': 'center'}).findAll('img') for img in imgs: coderpig.download_pic(img['src'], save_path) if __name__ == '__main__': coderpig.init_https() url_list = catch_pic_diagrams_url(taotu_url) for url in url_list: print('====== 抓取 ======:' + url) catch_pic_diagrams(url)