def __init__(self): self.ita_urls_file = open(rp_config.root_path + rp_config.dl_ita_urls, 'r') self.ita_urls = self.ita_urls_file.readlines() if len(self.ita_urls): pass else: import bbsmenu menu = bbsmenu.bbsmenu() menu.create_dl_ita_list() self.ita_urls_file = open(rp_config.root_path + rp_config.dl_ita_urls, 'r') self.ita_urls = self.ita_urls_file.readlines() self.target_ita_url = self.ita_urls[0].rstrip() ita_pat = re.compile('/([^/]+)/$') self.target_ita = ita_pat.findall(self.target_ita_url)[0] print self.target_ita del self.ita_urls[0] self.ita_urls_file = open(rp_config.root_path + rp_config.dl_ita_urls, 'w') self.ita_urls_file.writelines(self.ita_urls) self.ita_urls_file.close()
else: print('保存に失敗') #print img_result else: print('ダウンロード失敗') if dl_dat_flag: res_nos = "".join( map( (lambda x: str(x) + ','), v['res_nos']) ) if img_ids: s_mysql.save_res(no = k, res_nos= res_nos, res_count = v['res_count'], th_id = th_id, img_ids=img_ids, time = v['res_date']) if dl_dat_flag: s_dat.save_dat_file() os.utime(rp_config.renew, None) else: print '-画像なし' s_dat.save_num_file() time.sleep(15) #print line if __name__ == '__main__': crawl = crawler() try: crawl.crawl_subject() crawl.crawl_dat() except: import bbsmenu menu = bbsmenu.bbsmenu() menu.create_bbs_dict() menu.create_dl_ita_list() else: crawl.crawl_subject() crawl.crawl_dat()