def daum_main(rss, title, episode_start, episode_end, output_dir, merge, png): idlist = parsing_rss(rss) idlist.reverse() if len(idlist) < 1: print '[ERROR] Not found episode in RSS' sys.exit(1) episode = 0 cookie = None for id in idlist: if cookie is None: cookie = get_cookie(id) episode += 1 if episode_start > episode: continue if episode_end < episode: break info = get_imginfo(id, cookie) if title is None: title = info['title'].encode('euc-kr') title = title.translate(None, '\\/:*?"<>|').strip() if not os.path.isdir(output_dir + title): os.makedirs(output_dir + title) # get episode title episode_title = info['episodeTitle'].encode('euc-kr') episode_title = episode_title.translate(None, '\\/:*?"<>|').strip() # get image img_list = [] img_output = "%s%s\\%s_%03d_%s.jpg" %\ (output_dir, title, title, episode, episode_title) sequence = 0 for img in info['images']: sequence += 1 output_name = "%s%s\\%s_%03d_%03d.jpg" %\ (output_dir, title, title, episode, sequence) wget_cmd = 'wget -O "' + output_name.decode( 'euc-kr') + '" ' + img['url'] result = os.system(wget_cmd.encode('euc-kr')) if result != 0: print '[ERROR] Failed download' sys.exit(1) img_list.append(output_name) # merge image files if merge: if img_output[-4:] == '.jpg': img_output = img_output[:-4] if merge_image.merge_image(img_output, img_list, png): # delete image files for img in img_list: os.remove(img)
def daum_main(rss, title, episode_start, episode_end, output_dir, merge, png): idlist = parsing_rss(rss) idlist.reverse() if len(idlist) < 1: print '[ERROR] Not found episode in RSS' sys.exit(1) episode = 0 cookie = None for id in idlist: if cookie is None: cookie = get_cookie(id) episode += 1 if episode_start > episode: continue if episode_end < episode: break info = get_imginfo(id, cookie) if title is None: title = info['title'].encode('euc-kr') title = title.translate(None, '\\/:*?"<>|').strip() if not os.path.isdir(output_dir+title): os.makedirs(output_dir+title) # get episode title episode_title = info['episodeTitle'].encode('euc-kr') episode_title = episode_title.translate(None, '\\/:*?"<>|').strip() # get image img_list = [] img_output = "%s%s\\%s_%03d_%s.jpg" %\ (output_dir, title, title, episode, episode_title) sequence = 0 for img in info['images']: sequence += 1 output_name = "%s%s\\%s_%03d_%03d.jpg" %\ (output_dir, title, title, episode, sequence) wget_cmd = 'wget -O "'+output_name.decode('euc-kr')+'" '+img['url'] result = os.system(wget_cmd.encode('euc-kr')) if result != 0: print '[ERROR] Failed download' sys.exit(1) img_list.append(output_name) # merge image files if merge: if img_output[-4:] == '.jpg': img_output = img_output[:-4] if merge_image.merge_image(img_output, img_list, png): # delete image files for img in img_list: os.remove(img)
def naver_main(title_id, title, episode_start, episode_end, output_dir, merge, png, best): if title_id == '': usage() if title == None: title = '' if (episode_start > episode_end): print "[ERROR] Incorrect episode range" if not os.path.isdir(output_dir+title): cmd = 'md "%s"'%(output_dir+title) os.system(cmd) find_string = ['http://imgcomic.naver.com/webtoon/'+title_id+'/', 'http://imgcomic.naver.net/webtoon/'+title_id+'/'] find_string_for_best = ['http://imgcomic.naver.net/nas/user_contents_data/challenge_comic', ''] retry_episode = 0 page_url = '' webtoon_type = WEEKLY_WEBTOON if best: webtoon_type = CHALLENGE_BEST for episode in range(episode_start, episode_end + 1): if os.path.isfile('.\\output.output'): os.system('del .\\output.output') if webtoon_type == WEEKLY_WEBTOON: print 'Try to start WEEKLY webtoon download..' page_url = '"http://comic.naver.com/webtoon/detail.nhn?titleId=%s&no=%d"'%(title_id, episode) elif webtoon_type == CHALLENGE_BEST: print 'Try to start BEST challenge webtoon download..' page_url = '"http://comic.naver.com/bestChallenge/detail.nhn?titleId=%s&no=%d"' % (title_id, episode) else: break curl_cmd = 'curl -o .\\output.output '+page_url print 'curl cmd: '+curl_cmd curl = subprocess.Popen(curl_cmd, shell=True) for i in range(0,30): # 3 seconds time.sleep(0.1) if curl.poll() != None: break if not os.path.isfile('.\\output.output'): if retry_episode < 5: retry_episode += 1 continue print '[INFO] Finish!' break retry_episode = 0 output_file = open('.\\output.output', 'r') img_list = [] seq = 0 s_idx = -1 for line in output_file.readlines(): line = line.strip() if webtoon_type == WEEKLY_WEBTOON: s_idx = line.find(find_string[0]) if s_idx == -1: s_idx = line.find(find_string[1]) else: # webtoon_type == CHALLENGE_BEST: s_idx = line.find(find_string_for_best[0]) if s_idx != -1: seq += 1 e_idx = line[s_idx:].find('"') url = line[s_idx:s_idx+e_idx] if url[-4:].lower() == ".jpg" or url[-4:].lower() == ".png": output_name = "%s%s/%s_%03d_%03d.jpg" %\ (output_dir, title, title, episode, seq) if webtoon_type == WEEKLY_WEBTOON: referer='http://comic.naver.com/webtoon/detail.nhn?titleId=%s&no=%d'%(title_id, episode) wget_cmd = 'wget -O '+output_name+' --header="Referer: '+referer+'" '+url else: # webtoon_type == CHALLENGE_BEST: referer='http://comic.naver.com/bestChallenge/detail.nhn?titleId=%s&no=%d'%(title_id, episode) wget_cmd = 'wget -O '+output_name+' --header="Referer: '+referer+'" '+url print wget_cmd result = os.system(wget_cmd) if result != 0: print '[ERROR] Failed download' img_list.append(output_name) output_file.close() # merge image files if merge: img_output = "%s%s/%s_%03d" % (output_dir, title, title, episode) if merge_image.merge_image(img_output, img_list, png): # delete image files for img in img_list: os.remove(img)
def naver_main(title_id, title, episode_start, episode_end, output_dir, merge, png, best): if title_id == '': usage() if title == None: title = '' if (episode_start > episode_end): print "[ERROR] Incorrect episode range" if not os.path.isdir(output_dir + title): cmd = 'md "%s"' % (output_dir + title) os.system(cmd) find_string = [ 'http://imgcomic.naver.com/webtoon/' + title_id + '/', 'http://imgcomic.naver.net/webtoon/' + title_id + '/' ] find_string_for_best = [ 'http://imgcomic.naver.net/nas/user_contents_data/challenge_comic', '' ] retry_episode = 0 page_url = '' webtoon_type = WEEKLY_WEBTOON if best: webtoon_type = CHALLENGE_BEST for episode in range(episode_start, episode_end + 1): if os.path.isfile('.\\output.output'): os.system('del .\\output.output') if webtoon_type == WEEKLY_WEBTOON: print 'Try to start WEEKLY webtoon download..' page_url = '"http://comic.naver.com/webtoon/detail.nhn?titleId=%s&no=%d"' % ( title_id, episode) elif webtoon_type == CHALLENGE_BEST: print 'Try to start BEST challenge webtoon download..' page_url = '"http://comic.naver.com/bestChallenge/detail.nhn?titleId=%s&no=%d"' % ( title_id, episode) else: break curl_cmd = 'curl -o .\\output.output ' + page_url print 'curl cmd: ' + curl_cmd curl = subprocess.Popen(curl_cmd, shell=True) for i in range(0, 30): # 3 seconds time.sleep(0.1) if curl.poll() != None: break if not os.path.isfile('.\\output.output'): if retry_episode < 5: retry_episode += 1 continue print '[INFO] Finish!' break retry_episode = 0 output_file = open('.\\output.output', 'r') img_list = [] seq = 0 s_idx = -1 for line in output_file.readlines(): line = line.strip() if webtoon_type == WEEKLY_WEBTOON: s_idx = line.find(find_string[0]) if s_idx == -1: s_idx = line.find(find_string[1]) else: # webtoon_type == CHALLENGE_BEST: s_idx = line.find(find_string_for_best[0]) if s_idx != -1: seq += 1 e_idx = line[s_idx:].find('"') url = line[s_idx:s_idx + e_idx] if url[-4:].lower() == ".jpg" or url[-4:].lower() == ".png": output_name = "%s%s/%s_%03d_%03d.jpg" %\ (output_dir, title, title, episode, seq) if webtoon_type == WEEKLY_WEBTOON: referer = 'http://comic.naver.com/webtoon/detail.nhn?titleId=%s&no=%d' % ( title_id, episode) wget_cmd = 'wget -O ' + output_name + ' --header="Referer: ' + referer + '" ' + url else: # webtoon_type == CHALLENGE_BEST: referer = 'http://comic.naver.com/bestChallenge/detail.nhn?titleId=%s&no=%d' % ( title_id, episode) wget_cmd = 'wget -O ' + output_name + ' --header="Referer: ' + referer + '" ' + url print wget_cmd result = os.system(wget_cmd) if result != 0: print '[ERROR] Failed download' img_list.append(output_name) output_file.close() # merge image files if merge: img_output = "%s%s/%s_%03d" % (output_dir, title, title, episode) if merge_image.merge_image(img_output, img_list, png): # delete image files for img in img_list: os.remove(img)
def naver_main(title_id, title, episode_start, episode_end, output_dir, merge, png, best): if title_id == '': usage() if title is None: title = '' if episode_start > episode_end: print "[ERROR] Incorrect episode range" if not os.path.isdir(output_dir + title): cmd = 'md "%s"' % (output_dir + title) os.system(cmd) find_strings = ['https://imgcomic.naver.com/webtoon/' + title_id + '/', 'https://imgcomic.naver.net/webtoon/' + title_id + '/', 'https://image-comic.pstatic.net/webtoon/' + title_id + '/'] find_string_for_best = ['https://imgcomic.naver.net/nas/user_contents_data/challenge_comic', ''] retry_episode = 0 referer = '' webtoon_type = WEEKLY_WEBTOON if best: webtoon_type = CHALLENGE_BEST for episode in range(episode_start, episode_end + 1): if os.path.isfile('.\\output.output'): os.system('del .\\output.output') if webtoon_type == WEEKLY_WEBTOON: print 'Try to start WEEKLY webtoon download..' page_url = 'https://comic.naver.com/webtoon/detail.nhn?titleId=%s&no=%d' % (title_id, episode) referer = 'https://comic.naver.com/webtoon/detail.nhn?titleId=%s&no=%d' % (title_id, episode) else: # webtoon_type == CHALLENGE_BEST: print 'Try to start BEST challenge webtoon download..' page_url = 'https://comic.naver.com/bestChallenge/detail.nhn?titleId=%s&no=%d' % (title_id, episode) referer = 'https://comic.naver.com/bestChallenge/detail.nhn?titleId=%s&no=%d' % (title_id, episode) if _down('./output.output', referer, page_url, True) != 0: if retry_episode < 5: retry_episode += 1 continue print '[INFO] Finish!' break retry_episode = 0 output_file = open('.\\output.output', 'r') img_list = [] seq = 0 s_idx = -1 for line in output_file.readlines(): line = line.strip() if webtoon_type == WEEKLY_WEBTOON: for find_string in find_strings: s_idx = line.find(find_string) if s_idx != -1: break else: # webtoon_type == CHALLENGE_BEST: s_idx = line.find(find_string_for_best[0]) if s_idx != -1: seq += 1 e_idx = line[s_idx:].find('"') url = line[s_idx:s_idx + e_idx] if url[-4:].lower() == ".jpg" or url[-4:].lower() == ".png" or url[-4:].lower() == ".gif": output_name = "%s%s/%s_%03d_%03d.jpg" % \ (output_dir, title, title, episode, seq) result = _down(output_name, referer, url) if result != 0: print '[ERROR] Failed download' img_list.append(output_name) output_file.close() # merge image files if merge: img_output = "%s%s/%s_%03d" % (output_dir, title, title, episode) if merge_image.merge_image(img_output, img_list, png): # delete image files for img in img_list: os.remove(img)