def cmd_parser(): load_config() parser = OptionParser( '\n nhentai --search [keyword] --download' '\n NHENTAI=http://h.loli.club nhentai --id [ID ...]' '\n nhentai --file [filename]' '\n\nEnvironment Variable:\n' ' NHENTAI nhentai mirror url') # operation options parser.add_option('--download', '-D', dest='is_download', action='store_true', help='download doujinshi (for search results)') parser.add_option('--show', '-S', dest='is_show', action='store_true', help='just show the doujinshi information') # doujinshi options parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3') parser.add_option('--search', '-s', type='string', dest='keyword', action='store', help='search doujinshi by keyword') parser.add_option('--favorites', '-F', action='store_true', dest='favorites', help='list or download your favorites.') # page options parser.add_option('--page-all', dest='page_all', action='store_true', default=False, help='all search results') parser.add_option('--page', '--page-range', type='string', dest='page', action='store', default='', help='page number of search results. e.g. 1,2-5,14') parser.add_option( '--sorting', dest='sorting', action='store', default='recent', help='sorting of doujinshi (recent / popular / popular-[today|week])', choices=['recent', 'popular', 'popular-today', 'popular-week']) # download options parser.add_option('--output', '-o', type='string', dest='output_dir', action='store', default='./', help='output dir') parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5, help='thread count for downloading doujinshi') parser.add_option('--timeout', '-T', type='int', dest='timeout', action='store', default=30, help='timeout for downloading doujinshi') parser.add_option('--delay', '-d', type='int', dest='delay', action='store', default=0, help='slow down between downloading every doujinshi') parser.add_option( '--proxy', type='string', dest='proxy', action='store', default='', help='store a proxy, for example: -p \'http://127.0.0.1:1080\'') parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.') parser.add_option('--format', type='string', dest='name_format', action='store', help='format the saved folder name', default='[%i][%a][%t]') # generate options parser.add_option('--html', dest='html_viewer', action='store_true', help='generate a html viewer at current directory') parser.add_option('--no-html', dest='is_nohtml', action='store_true', help='don\'t generate HTML after downloading') parser.add_option( '--gen-main', dest='main_viewer', action='store_true', help='generate a main viewer contain all the doujin in the folder') parser.add_option('--cbz', '-C', dest='is_cbz', action='store_true', help='generate Comic Book CBZ File') parser.add_option('--pdf', '-P', dest='is_pdf', action='store_true', help='generate PDF file') parser.add_option( '--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False, help='remove downloaded doujinshi dir when generated CBZ or PDF file.') # nhentai options parser.add_option('--cookie', type='str', dest='cookie', action='store', help='set cookie of nhentai to bypass Google recaptcha') parser.add_option('--language', type='str', dest='language', action='store', help='set default language to parse doujinshis') parser.add_option('--clean-language', dest='clean_language', action='store_true', default=False, help='set DEFAULT as language to parse doujinshis') parser.add_option( '--save-download-history', dest='is_save_download_history', action='store_true', default=False, help= 'save downloaded doujinshis, whose will be skipped if you re-download them' ) parser.add_option('--clean-download-history', action='store_true', default=False, dest='clean_download_history', help='clean download history') try: sys.argv = [unicode(i.decode(sys.stdin.encoding)) for i in sys.argv] print() except (NameError, TypeError): pass except UnicodeDecodeError: exit(0) args, _ = parser.parse_args(sys.argv[1:]) if args.html_viewer: generate_html() exit(0) if args.main_viewer and not args.id and not args.keyword and not args.favorites: generate_main_html() exit(0) if args.clean_download_history: with DB() as db: db.clean_all() logger.info('Download history cleaned.') exit(0) # --- set config --- if args.cookie is not None: constant.CONFIG['cookie'] = args.cookie logger.info('Cookie saved.') write_config() exit(0) if args.language is not None: constant.CONFIG['language'] = args.language logger.info('Default language now set to \'{0}\''.format( args.language)) write_config() exit(0) # TODO: search without language if args.proxy: proxy_url = urlparse(args.proxy) if not args.proxy == '' and proxy_url.scheme not in ('http', 'https'): logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format( proxy_url.scheme)) exit(0) else: constant.CONFIG['proxy'] = { 'http': args.proxy, 'https': args.proxy, } logger.info('Proxy now set to \'{0}\'.'.format(args.proxy)) write_config() exit(0) # --- end set config --- if args.favorites: if not constant.CONFIG['cookie']: logger.warning( 'Cookie has not been set, please use `nhentai --cookie \'COOKIE\'` to set it.' ) exit(1) if args.id: _ = [i.strip() for i in args.id.split(',')] args.id = set(int(i) for i in _ if i.isdigit()) if args.file: with open(args.file, 'r') as f: _ = [i.strip() for i in f.readlines()] args.id = set(int(i) for i in _ if i.isdigit()) if (args.is_download or args.is_show ) and not args.id and not args.keyword and not args.favorites: logger.critical('Doujinshi id(s) are required for downloading') parser.print_help() exit(1) if not args.keyword and not args.id and not args.favorites: parser.print_help() exit(1) if args.threads <= 0: args.threads = 1 elif args.threads > 15: logger.critical('Maximum number of used threads is 15') exit(1) return args
def main(): banner() if sys.version_info < (3, 0, 0): logger.error('nhentai now only support Python 3.x') exit(1) options = cmd_parser() logger.info('Using mirror: {0}'.format(BASE_URL)) # CONFIG['proxy'] will be changed after cmd_parser() if constant.CONFIG['proxy']['http']: logger.info('Using proxy: {0}'.format( constant.CONFIG['proxy']['http'])) if not constant.CONFIG['template']: constant.CONFIG['template'] = 'default' logger.info('Using viewer template "{}"'.format( constant.CONFIG['template'])) # check your cookie check_cookie() doujinshis = [] doujinshi_ids = [] doujinshi_list = [] page_list = paging(options.page) if options.favorites: if not options.is_download: logger.warning('You do not specify --download option') doujinshis = favorites_parser(page=page_list) elif options.keyword: if constant.CONFIG['language']: logger.info('Using default language: {0}'.format( constant.CONFIG['language'])) options.keyword += ' language:{}'.format( constant.CONFIG['language']) doujinshis = search_parser(options.keyword, sorting=options.sorting, page=page_list, is_page_all=options.page_all) elif not doujinshi_ids: doujinshi_ids = options.id print_doujinshi(doujinshis) if options.is_download and doujinshis: doujinshi_ids = [i['id'] for i in doujinshis] if options.is_save_download_history: with DB() as db: data = map(int, db.get_all()) doujinshi_ids = list(set(map(int, doujinshi_ids)) - set(data)) if doujinshi_ids: for i, id_ in enumerate(doujinshi_ids): if options.delay: time.sleep(options.delay) doujinshi_info = doujinshi_parser(id_) if doujinshi_info: doujinshi_list.append( Doujinshi(name_format=options.name_format, **doujinshi_info)) if (i + 1) % 10 == 0: logger.info('Progress: %d / %d' % (i + 1, len(doujinshi_ids))) if not options.is_show: downloader = Downloader(path=options.output_dir, size=options.threads, timeout=options.timeout, delay=options.delay) for doujinshi in doujinshi_list: if not options.dryrun: doujinshi.downloader = downloader doujinshi.download() doujinshi.downloader = downloader doujinshi.download() if options.generate_metadata: table = doujinshi.table generate_metadata_file(options.output_dir, table, doujinshi) if options.is_save_download_history: with DB() as db: db.add_one(doujinshi.id) if not options.is_nohtml and not options.is_cbz and not options.is_pdf: generate_html(options.output_dir, doujinshi, template=constant.CONFIG['template']) elif options.is_cbz: generate_cbz(options.output_dir, doujinshi, options.rm_origin_dir) elif options.is_pdf: generate_pdf(options.output_dir, doujinshi, options.rm_origin_dir) if options.main_viewer: generate_main_html(options.output_dir) if not platform.system() == 'Windows': logger.log(15, '🍻 All done.') else: logger.log(15, 'All done.') else: [doujinshi.show() for doujinshi in doujinshi_list]
def main(): banner() options = cmd_parser() logger.info('Using mirror: {0}'.format(BASE_URL)) from nhentai.constant import PROXY # constant.PROXY will be changed after cmd_parser() if PROXY != {}: logger.info('Using proxy: {0}'.format(PROXY)) # check your cookie check_cookie() index = 0 index_value = None doujinshis = [] doujinshi_ids = [] doujinshi_list = [] if options.favorites: if not options.is_download: logger.warning('You do not specify --download option') doujinshis = favorites_parser(options.page_range) elif options.tag: doujinshis = tag_parser(options.tag, sorting=options.sorting, max_page=options.max_page) elif options.artist: index = 1 index_value = options.artist elif options.character: index = 2 index_value = options.character elif options.parody: index = 3 index_value = options.parody elif options.group: index = 4 index_value = options.group elif options.language: index = 5 index_value = options.language elif options.keyword: doujinshis = search_parser(options.keyword, sorting=options.sorting, page=options.page) elif not doujinshi_ids: doujinshi_ids = options.id if index: doujinshis = tag_parser(index_value, max_page=options.max_page, index=index) print_doujinshi(doujinshis) if options.is_download and doujinshis: doujinshi_ids = [i['id'] for i in doujinshis] if options.is_save_download_history: with DB() as db: data = set(db.get_all()) doujinshi_ids = list(set(doujinshi_ids) - data) if doujinshi_ids: for i, id_ in enumerate(doujinshi_ids): if options.delay: time.sleep(options.delay) doujinshi_info = doujinshi_parser(id_) if doujinshi_info: doujinshi_list.append( Doujinshi(name_format=options.name_format, **doujinshi_info)) if (i + 1) % 10 == 0: logger.info('Progress: %d / %d' % (i + 1, len(doujinshi_ids))) if not options.is_show: downloader = Downloader(path=options.output_dir, size=options.threads, timeout=options.timeout, delay=options.delay) for doujinshi in doujinshi_list: doujinshi.downloader = downloader doujinshi.download() if options.is_save_download_history: with DB() as db: db.add_one(doujinshi.id) if not options.is_nohtml and not options.is_cbz: generate_html(options.output_dir, doujinshi) elif options.is_cbz: generate_cbz(options.output_dir, doujinshi, options.rm_origin_dir, options.write_comic_info) if options.main_viewer: generate_main_html(options.output_dir) if not platform.system() == 'Windows': logger.log(15, '🍻 All done.') else: logger.log(15, 'All done.') else: [doujinshi.show() for doujinshi in doujinshi_list]
def cmd_parser(): parser = OptionParser( '\n nhentai --search [keyword] --download' '\n NHENTAI=http://h.loli.club nhentai --id [ID ...]' '\n nhentai --file [filename]' '\n\nEnvironment Variable:\n' ' NHENTAI nhentai mirror url') # operation options parser.add_option('--download', '-D', dest='is_download', action='store_true', help='download doujinshi (for search results)') parser.add_option('--show', '-S', dest='is_show', action='store_true', help='just show the doujinshi information') # doujinshi options parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3') parser.add_option('--search', '-s', type='string', dest='keyword', action='store', help='search doujinshi by keyword') parser.add_option('--tag', type='string', dest='tag', action='store', help='download doujinshi by tag') parser.add_option('--artist', type='string', dest='artist', action='store', help='download doujinshi by artist') parser.add_option('--character', type='string', dest='character', action='store', help='download doujinshi by character') parser.add_option('--parody', type='string', dest='parody', action='store', help='download doujinshi by parody') parser.add_option('--group', type='string', dest='group', action='store', help='download doujinshi by group') parser.add_option('--language', type='string', dest='language', action='store', help='download doujinshi by language') parser.add_option('--favorites', '-F', action='store_true', dest='favorites', help='list or download your favorites.') # page options parser.add_option('--page', type='int', dest='page', action='store', default=1, help='page number of search results') parser.add_option( '--max-page', type='int', dest='max_page', action='store', default=1, help='The max page when recursive download tagged doujinshi') parser.add_option('--page-range', type='string', dest='page_range', action='store', help='page range of favorites. e.g. 1,2-5,14') parser.add_option('--sorting', dest='sorting', action='store', default='date', help='sorting of doujinshi (date / popular)', choices=['date', 'popular']) # download options parser.add_option('--output', '-o', type='string', dest='output_dir', action='store', default='', help='output dir') parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5, help='thread count for downloading doujinshi') parser.add_option('--timeout', '-T', type='int', dest='timeout', action='store', default=30, help='timeout for downloading doujinshi') parser.add_option('--delay', '-d', type='int', dest='delay', action='store', default=0, help='slow down between downloading every doujinshi') parser.add_option( '--proxy', '-p', type='string', dest='proxy', action='store', default='', help='store a proxy, for example: -p \'http://127.0.0.1:1080\'') parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.') parser.add_option('--format', type='string', dest='name_format', action='store', help='format the saved folder name', default='[%i][%a][%t]') # generate options parser.add_option('--html', dest='html_viewer', action='store_true', help='generate a html viewer at current directory') parser.add_option('--no-html', dest='is_nohtml', action='store_true', help='don\'t generate HTML after downloading') parser.add_option( '--gen-main', dest='main_viewer', action='store_true', help='generate a main viewer contain all the doujin in the folder') parser.add_option('--cbz', '-C', dest='is_cbz', action='store_true', help='generate Comic Book CBZ File') parser.add_option( '--comic-info', dest='write_comic_info', action='store_true', help='when generating Comic Book CBZ File, also write ComicInfo.xml') parser.add_option( '--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False, help='remove downloaded doujinshi dir when generated CBZ file.') # nhentai options parser.add_option('--cookie', type='str', dest='cookie', action='store', help='set cookie of nhentai to bypass Google recaptcha') parser.add_option( '--save-download-history', dest='is_save_download_history', action='store_true', default=False, help= 'save downloaded doujinshis, whose will be skipped if you re-download them' ) parser.add_option('--clean-download-history', action='store_true', default=False, dest='clean_download_history', help='clean download history') try: sys.argv = [unicode(i.decode(sys.stdin.encoding)) for i in sys.argv] print() except (NameError, TypeError): pass except UnicodeDecodeError: exit(0) args, _ = parser.parse_args(sys.argv[1:]) if args.html_viewer: generate_html() exit(0) if args.main_viewer and not args.id and not args.keyword and \ not args.tag and not args.artist and not args.character and \ not args.parody and not args.group and not args.language and not args.favorites: generate_main_html() exit(0) if args.clean_download_history: with DB() as db: db.clean_all() logger.info('Download history cleaned.') exit(0) if os.path.exists(constant.NHENTAI_COOKIE): with open(constant.NHENTAI_COOKIE, 'r') as f: constant.COOKIE = f.read() if args.cookie: try: if not os.path.exists(constant.NHENTAI_HOME): os.mkdir(constant.NHENTAI_HOME) with open(constant.NHENTAI_COOKIE, 'w') as f: f.write(args.cookie) except Exception as e: logger.error('Cannot create NHENTAI_HOME: {}'.format(str(e))) exit(1) logger.info('Cookie saved.') exit(0) if os.path.exists(constant.NHENTAI_PROXY): with open(constant.NHENTAI_PROXY, 'r') as f: link = f.read() constant.PROXY = {'http': link, 'https': link} if args.proxy: try: if not os.path.exists(constant.NHENTAI_HOME): os.mkdir(constant.NHENTAI_HOME) proxy_url = urlparse(args.proxy) if proxy_url.scheme not in ('http', 'https'): logger.error( 'Invalid protocol \'{0}\' of proxy, ignored'.format( proxy_url.scheme)) else: with open(constant.NHENTAI_PROXY, 'w') as f: f.write(args.proxy) except Exception as e: logger.error('Cannot create NHENTAI_HOME: {}'.format(str(e))) exit(1) logger.info('Proxy \'{0}\' saved.'.format(args.proxy)) exit(0) if args.favorites: if not constant.COOKIE: logger.warning( 'Cookie has not been set, please use `nhentai --cookie \'COOKIE\'` to set it.' ) exit(1) if args.id: _ = [i.strip() for i in args.id.split(',')] args.id = set(int(i) for i in _ if i.isdigit()) if args.file: with open(args.file, 'r') as f: _ = [i.strip() for i in f.readlines()] args.id = set(int(i) for i in _ if i.isdigit()) if (args.is_download or args.is_show) and not args.id and not args.keyword and \ not args.tag and not args.artist and not args.character and \ not args.parody and not args.group and not args.language and not args.favorites: logger.critical('Doujinshi id(s) are required for downloading') parser.print_help() exit(1) if not args.keyword and not args.id and not args.tag and not args.artist and \ not args.character and not args.parody and not args.group and not args.language and not args.favorites: parser.print_help() exit(1) if args.threads <= 0: args.threads = 1 elif args.threads > 15: logger.critical('Maximum number of used threads is 15') exit(1) return args