def read(self): ui_setting = self.ui_setting if self.cw.format: self._format = self.cw.format if self._format == 'mp3': self.cw.setMusic(True) if ui_setting.nicoBox.isChecked(): username = compatstr(ui_setting.nico_id.text()) password = compatstr(ui_setting.nico_pw.text()) else: username = '' password = '' try: session = login(username, password) except Exception as e: logout() raise errors.Invalid(u'Failed to login: {}'.format(self.url), fail=True) self.session = session try: video = get_video(session, self.url, self._format, self.cw) except Exception as e: logout() raise self.urls.append(video.url) self.setIcon(video.thumb) self.enableSegment() self.title = video.title
def read(self): ui_setting = self.ui_setting if ui_setting.nicoBox.isChecked(): username = compatstr(ui_setting.nico_id.text()) password = compatstr(ui_setting.nico_pw.text()) else: username = '' password = '' try: session = login(username, password) except Exception as e: logout() return self.Invalid(u'Failed to login: {}'.format(self.url), fail=True) self.session = session try: video = get_video(session, self.id_, cw=self.cw) except Exception as e: logout() raise self.urls.append(video.url) self.filenames[video.url] = video.fileName self.setIcon(video.thumb) self.enableSegment() self.title = video.title
def tags_matched(tags_illust, cw=None): print_ = get_print(cw) cache = getattr(cw, '__pixiv_tag_cache', None) if cache is not None: tags = cache['tags'] tags_ex = cache['tags_ex'] else: if utils.ui_setting and utils.ui_setting.groupBox_tag.isChecked(): tags_ = [ compatstr(utils.ui_setting.tagList.item(i).text()) for i in range(utils.ui_setting.tagList.count()) ] else: tags_ = [] tags = set() tags_ex = set() for tag in tags_: tag = pretty_tag(tag) if tag.startswith('-'): tags_ex.add(tag[1:].strip()) else: tags.add(tag) print_('tags: [{}]'.format(', '.join(tags))) print_('tags_ex: [{}]'.format(', '.join(tags_ex))) if cw: cache = {} cache['tags'] = tags cache['tags_ex'] = tags_ex cw.__pixiv_tag_cache = cache tags_illust = set(pretty_tag(tag) for tag in tags_illust) return (not tags or tags & tags_illust) and tags_ex.isdisjoint(tags_illust)
def read(self): format = compatstr( self.ui_setting.youtubeFormat.currentText()).lower().strip() video = get_video(self.url, format) self.urls.append(video.url) self.setIcon(video.thumb) self.title = video.title
def read(self): cw = self.customWidget format = compatstr( self.ui_setting.youtubeFormat.currentText()).lower().strip() info, video = get_video(self.url, format) title = info['title'] self.urls.append(video.url) thumb = BytesIO() downloader.download(info['thumbnail'], buffer=thumb) self.setIcon(thumb) self.title = title
def init(self): ui_setting = self.ui_setting if self.cw.format: ext_result = self.cw.format else: ext_result = compatstr(ui_setting.youtubeCombo_type.currentText()).lower().split()[0] self.cw.format = ext_result if ext_result in ['mp4', 'mkv', '3gp']: self.yt_type = 'video' else: self.yt_type = 'audio' self.cw.setMusic(True)
def read(self): ui_setting = self.ui_setting format = compatstr( ui_setting.youtubeFormat.currentText()).lower().strip() video = Video(self.url, format) self.urls.append(video.url) self.setIcon(video.thumb) self.customWidget.enableSegment() self.title = video.title
def read(self): cw = self.customWidget ui_setting = self.ui_setting format = compatstr( ui_setting.youtubeFormat.currentText()).lower().strip() cw.enableSegment(1024 * 1024 // 2) thumb = BytesIO() if '/users/' in self.url: info = read_channel(self.url, cw) urls = info['urls'] p2f = get_p2f(cw) if p2f: self.single = False self.title = clean_title(info['title']) videos = [Video(url, format) for url in info['urls']] self.urls = [video.url for video in videos] video = videos[0] video.url() downloader.download(video.info['thumbnail'], buffer=thumb) self.setIcon(thumb) return else: cw.gal_num = self.url = urls.pop(0) if urls and cw.alive: s = u', '.join(urls) self.exec_queue.put((s, 'downButton(customWidget)')) elif '/photos/gallery/' in self.url: info = read_gallery(self.url, cw) for img in info['imgs']: self.urls.append(img.url) self.single = False self.title = clean_title(info['title']) self.url = info['url'] cw.disableSegment() return else: urls = [] video = Video(self.url, format) video.url() self.urls.append(video.url) downloader.download(video.info['thumbnail'], buffer=thumb) self.setIcon(thumb) self.title = video.title
def init(self): ui_setting = self.ui_setting if 'youtube_' in self.url: self.url = u'https://www.youtube.com/watch?v={}'.format( self.url.replace('youtube_', '')) if self.customWidget.format: ext_result = self.customWidget.format else: ext_result = compatstr( ui_setting.youtubeCombo_type.currentText()).lower().split()[0] self.customWidget.format = ext_result if ext_result in ['mp4', 'mkv', '3gp']: self.yt_type = 'video' else: self.yt_type = 'audio' self.customWidget.setMusic(True)
def tags_matched(tags_illust, tags_add, cw=None): print_ = get_print(cw) cache = cw.get_extra('pixiv_tag_cache') if cw else None init = True if cache is not None: init = False tags = set(cache['tags']) tags_ex = set(cache['tags_ex']) else: if utils.ui_setting and utils.ui_setting.groupBox_tag.isChecked(): tags_ = [ compatstr(utils.ui_setting.tagList.item(i).text()) for i in range(utils.ui_setting.tagList.count()) ] else: tags_ = [] tags = set() tags_ex = set() for tag in tags_: tag = pretty_tag(tag) if tag.startswith('-'): tags_ex.add(tag[1:].strip()) else: tags.add(tag) if init: if cw: cache = {} cache['tags'] = list(tags) cache['tags_ex'] = list(tags_ex) cw.set_extra('pixiv_tag_cache', cache) print_('tags: [{}]'.format(', '.join(tags))) print_('tags_ex: [{}]'.format(', '.join(tags_ex))) if tags_add: tags.update((pretty_tag(tag) for tag in tags_add)) if init: print_('tags_add: {}'.format(tags_add)) tags_illust = set(pretty_tag(tag) for tag in tags_illust) return (not tags or tags & tags_illust) and tags_ex.isdisjoint(tags_illust)
def read(self): format = compatstr( self.ui_setting.youtubeFormat.currentText()).lower().strip() if re.search(PATTERN_VID, self.url) is None: info = read_channel(self.url, self.session, self.cw) items = info['items'] videos = [ Video( 'https://www.tiktok.com/@{}/video/{}'.format( info['uid'], item['id']), self.session, format) for item in items ] title = '{} (tiktok_{})'.format(info['nickname'], info['uid']) video = self.process_playlist(title, videos) else: video = Video(self.url, self.session, format) video.url() self.urls.append(video.url) self.title = clean_title(video.title)
def read(self): ui_setting = self.ui_setting format = compatstr( ui_setting.youtubeFormat.currentText()).lower().strip() page = get_page(self.url) videos, info = get_videos(self.url, self.customWidget) if not videos: raise Exception('No videos') for video in videos: self.urls.append(video.url) thumb = BytesIO() downloader.download(info['url_thumb'], buffer=thumb) self.setIcon(thumb) title = info['title'] if page is not None: title += (u'_p{}').format(page) format = format.replace('title', '###title').replace('id', '###id') title = format.replace('###title', title).replace('###id', (u'{}').format(self.id)) n = int(math.ceil(8.0 / len(videos))) self.customWidget.print_(('n_threads: {}').format(n)) self.customWidget.enableSegment(n_threads=n) self.title = title
def read(self): type = self.pixiv_type cw = self.cw print_ = cw.print_ ui_setting = self.ui_setting if type == 'following': raise NotImplementedError('following') self._format = [None, 'gif', 'webp', 'png'][ui_setting.ugoira_convert.currentIndex()] self._format_name = compatstr(ui_setting.pixivFormat.currentText()) types = self.get_types() if types: s = ', '.join(sorted(types)) else: s = 'all' types = None print_((u'Type: {}').format(s)) print_((u'info: {}').format(self.info)) api = self.api query = self.id.replace('_bmk', '').replace('_illust', '').replace('pixiv_', '').replace('search_', '') if type != 'search': query = int(query) print('pixiv_query:', query) try: if type in ('user', 'bookmark', 'search'): max_pid = get_max_range(cw) if ui_setting.groupBox_tag.isChecked(): tags = [ compatstr(ui_setting.tagList.item(i).text()) for i in range(ui_setting.tagList.count()) ] else: tags = [] if type == 'search': query = query.replace('+', ' ') name = query else: id = self.id.replace('_bmk', '').replace('pixiv_', '').replace('search_', '') print('name', id) name = get_name(id, self.api, cw=cw) self.artist = name title = u'{} ({})'.format(name, self.id) print_(title) dir = os.path.join(get_outdir('pixiv'), clean_title(title)) imgs = get_imgs(query, type=type, api=api, n=max_pid, tags=tags, types=types, format=self._format, format_name=self._format_name, dir=dir, cw=cw, title=title, info=self.info) elif type == 'illust': for try_ in range(N_TRY): try: detail = api.illust_detail(query, req_auth=True) error = detail.get('error') if error: raise PixivError(error) break except PixivError as e: api = e.api print_(e) if try_ < N_TRY - 1: print_('retry...') sleep(SLEEP, cw) else: raise illust = detail.illust name = illust.title title = (u'{} ({})').format(name, self.id) dir = os.path.join(get_outdir('pixiv'), clean_title(title)) imgs = get_imgs_from_illust(illust, api=api, format=self._format, dir=dir, cw=cw, format_name=self._format_name) except PixivError as e: msg = (u'PixivError: {}').format(e.message) return self.Invalid(msg) self.imgs = [] for img in imgs: d = {'type': img.type, 'url': img.url()} if img.type == 'ugoira': d['filename'] = img.filename d['frames'] = img.ugoira_data.frames self.imgs.append(d) for img in imgs: self.urls.append(img.url) self.title = clean_title(title) # 1390
def get_videos(url, cw=None, depth=0): print_ = get_print(cw) if utils.ui_setting: res_text = compatstr(utils.ui_setting.youtubeCombo_res.currentText()) res = { '720p': 720, '1080p': 1080, '2K': 1440, '4K': 2160, '8K': 4320 }[res_text] else: res = 720 mobj = re.match(_VALID_URL, url) video_id = mobj.group('id') anime_id = mobj.group('anime_id') print(video_id, anime_id) print_ = get_print(cw) html = downloader.read_html(url, methods={'requests'}) soup = Soup(html) title = soup.find('h1').attrs['title'].strip() url_thumb = soup.find('meta', {'property': 'og:image'}).attrs['content'] p = get_page(url) if p is None: p = 1 print('page:', p) if p > 1: pages = get_pages(html) cid = pages[(p - 1)]['cid'] else: cid = re.findall('\\bcid(?:["\\\']:|=)(\\d+)', html)[0] print_('cid: {}'.format(cid)) headers = {'Referer': url} entries = [] RENDITIONS = [ 'qn={}&quality={}&type='.format(qlt, qlt) for qlt in RESOLS.keys() ] # + ['quality=2&type=mp4'] for num, rendition in enumerate(RENDITIONS, start=1): print('####', num, rendition) payload = 'appkey=%s&cid=%s&otype=json&%s' % (_APP_KEY, cid, rendition) sign = hashlib.md5( (payload + _BILIBILI_KEY).encode('utf-8')).hexdigest() url_json = 'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % ( payload, sign) s_json = downloader.read_html(url_json) print(s_json[:1000]) video_info = json.loads(s_json) if not video_info: continue if 'durl' not in video_info: print('#### error', num) if num < len(RENDITIONS): continue msg = video_info.get('message') if msg: raise Exception(msg) quality = video_info['quality'] resolution = get_resolution(quality) s = (u'resolution: {}').format(resolution) print_(s) # 2184 if int(re.find('([0-9]+)p', resolution)) > res: print_('skip resolution') continue for idx, durl in enumerate(video_info['durl']): # 1343 if idx == 0: size = downloader.get_size(durl['url'], referer=url) if size < 1024 * 1024 and depth == 0: print_('size is too small') return get_videos(url, cw, depth + 1) formats = [{ 'url': durl['url'], 'filesize': int_or_none(durl['size']) }] for backup_url in durl.get('backup_url', []): formats.append({ 'url': backup_url, 'preference': -2 if 'hd.mp4' in backup_url else -3 }) for a_format in formats: a_format.setdefault('http_headers', {}).update({'Referer': url}) entries.append({ 'id': '%s_part%s' % (video_id, idx), 'duration': float_or_none(durl.get('length'), 1000), 'formats': formats }) break videos = [] for entry in entries: url_video = entry['formats'][0]['url'] video = Video(url_video, url, video_id, len(videos)) videos.append(video) info = {'title': clean_title(title), 'url_thumb': url_thumb} return (videos, info)
def get_info(url, cw=None, depth=0): print_ = get_print(cw) api = PixivAPI() info = {} imgs = [] if utils.ui_setting: ugoira_ext = [None, '.gif', '.webp', '.png'][utils.ui_setting.ugoira_convert.currentIndex()] else: ugoira_ext = None if utils.ui_setting: format_ = compatstr(utils.ui_setting.pixivFormat.currentText()) else: format_ = 'id_ppage' max_pid = get_max_range(cw) if api.illust_id(url): # Single post id_ = api.illust_id(url) data = api.illust(id_) login = '******' not in data if FORCE_LOGIN and not login: # raise errors.LoginRequired() if data['xRestrict'] and not login: raise errors.LoginRequired('R-18') info['artist'] = data['userName'] info['artist_id'] = data['userId'] info['raw_title'] = data['illustTitle'] info['title'] = '{} (pixiv_illust_{})'.format(info['raw_title'], id_) info['create_date'] = parse_time(data['createDate']) tags_illust = set(tag['tag'] for tag in data['tags']['tags']) if tags_matched(tags_illust, cw): if data['illustType'] == 2: # ugoira data = api.ugoira_meta(id_) ugoira = { 'ext': ugoira_ext, 'delay': [frame['delay'] for frame in data['frames']], } img = Image(data['originalSrc'], url, id_, 0, format_, info, cw, ugoira=ugoira) imgs.append(img) else: data = api.pages(id_) for img in data: img = Image(img['urls']['original'], url, id_, len(imgs), format_, info, cw) imgs.append(img) else: print('tags mismatched') elif '/bookmarks/' in url or 'bookmark.php' in url: # User bookmarks id_ = api.user_id(url) if id_ is None: # id_ = my_id() process_user(id_, info, api) info['title'] = '{} (pixiv_bmk_{})'.format(info['artist'], info['artist_id']) ids = [] ids_set = set() offset = 0 while len(ids) < max_pid: data = api.bookmarks(id_, offset) c = 0 for id in [work['id'] for work in data['works']]: if id in ids_set: continue ids_set.add(id) ids.append(id) c += 1 if not c: break offset += LIMIT if depth == 0: check_alive(cw) process_ids(ids[:max_pid], info, imgs, cw, depth) elif '/tags/' in url or 'search.php' in url: # Search q = unquote( re.find(r'/tags/([^/]+)', url) or re.find('[?&]word=([^&]*)', url, err='no tags')) info['title'] = '{} (pixiv_search_{})'.format(q, q.replace(' ', '+')) qs = query_url(url) order = qs.get('order', ['date_d'])[0] mode = qs.get('mode', ['all'])[0] ids = [] ids_set = set() p = 1 while len(ids) < max_pid: data = api.search(q, order, mode, p=p) c = 0 for id in [ illust['id'] for illust in data['illustManga']['data'] if 'id' in illust ]: if id in ids_set: continue ids_set.add(id) ids.append(id) c += 1 if not c: break p += 1 process_ids(ids[:max_pid], info, imgs, cw, depth) elif 'bookmark_new_illust.php' in url or 'bookmark_new_illust_r18.php' in url: # Newest works: Following r18 = 'bookmark_new_illust_r18.php' in url id_ = my_id() process_user(id_, info, api) info['title'] = '{} (pixiv_following_{}{})'.format( info['artist'], 'r18_' if r18 else '', info['artist_id']) ids = [] ids_set = set() p = 1 while len(ids) < max_pid: c = 0 for id in api.following(p, r18=r18): if id in ids_set: continue ids_set.add(id) ids.append(id) c += 1 if not c: break p += 1 process_ids(ids[:max_pid], info, imgs, cw, depth) elif api.user_id(url): # User illusts id_ = api.user_id(url) process_user(id_, info, api) data = api.profile(id_) info['title'] = '{} (pixiv_{})'.format(info['artist'], info['artist_id']) ids = [] for illusts in [data['illusts'], data['manga']]: if not illusts: continue ids += list(illusts.keys()) ids = sorted(ids, key=int, reverse=True) process_ids(ids[:max_pid], info, imgs, cw, depth) else: raise NotImplementedError() info['imgs'] = imgs[:max_pid] return info
def read(self): ui_setting = self.ui_setting cw = self.customWidget print_ = get_print(cw) format = compatstr( ui_setting.youtubeFormat.currentText()).lower().strip() if self.yt_type == 'video': res_text = compatstr(ui_setting.youtubeCombo_res.currentText()) res = { '720p': 720, '1080p': 1080, '2K': 1440, '4K': 2160, '8K': 4320 }[res_text] info = get_videos(self.url, type=self.yt_type, max_res=res, only_mp4=False, audio_included=not True, format=format, cw=cw) else: abr_text = compatstr(ui_setting.youtubeCombo_abr.currentText()) abr = int(abr_text.replace('k', '')) info = get_videos(self.url, type=self.yt_type, max_abr=abr, format=format, cw=cw) videos = info['videos'] cw.enableSegment(overwrite=True) # first video must be valid while videos: video = videos[0] try: video.url() break except Exception as e: print(e) videos.remove(video) else: raise Exception('No videos') if len(videos) > 1: p2f = get_p2f(cw) if p2f: self.single = False self.title = clean_title(info['title']) self.urls = [video.url for video in videos] video = videos[0] self.setIcon(video.thumb) return else: video = videos.pop(0) cw.gal_num = cw.url = video.url._url if videos and cw.alive: s = u', '.join(video.url._url for video in videos) self.exec_queue.put(([s, { 'youtube': cw.format }], 'downButton(cw[0], format_selector=cw[1])')) self.urls.append(video.url) cw.artist = video.username self.setIcon(video.thumb) self.title = video.title
def _pp(self, filename): cw = self.cw print_ = get_print(cw) ui_setting = utils.ui_setting ext = os.path.splitext(filename)[1].lower() if not os.path.isfile(filename): print(u'no file: {}'.format(filename)) return filename_new = None if self.type == 'video' and (self.audio is not None or ext != '.mp4'): # UHD or non-mp4 if self.audio is not None: # merge print_(u'Download audio: {}'.format(self.audio)) hash = uuid() path = os.path.join(os.path.dirname(filename), '{}_a.tmp'.format(hash)) if cw is not None: cw.trash_can.append(path) if constants.FAST: downloader_v3.download(self.audio, chunk=1024 * 1024, n_threads=2, outdir=os.path.dirname(path), fileName=os.path.basename(path), customWidget=cw, overwrite=True) else: downloader.download(self.audio, outdir=os.path.dirname(path), fileName=os.path.basename(path), customWidget=cw, overwrite=True) ext, out = ffmpeg.merge(filename, path, cw=cw, vcodec=self.vcodec) #print(out) name, ext_old = os.path.splitext(filename) if ext_old.lower() != ext.lower(): print_(u'rename ext {} --> {}'.format(ext_old, ext)) filename_new = u'{}{}'.format(name, ext) if os.path.isfile(filename_new): os.remove(filename_new) os.rename(filename, filename_new) else: # convert non-mp4 video -> mp4 name, ext_old = os.path.splitext(filename) filename_new = u'{}.mp4'.format(name) print_(u'Convert video: {} -> {}'.format( filename, filename_new)) ffmpeg.convert(filename, filename_new, cw=cw) elif self.type == 'audio' and ext != '.mp3': # convert non-mp3 audio -> mp3 name, ext_old = os.path.splitext(filename) filename_new = u'{}.mp3'.format(name) ffmpeg.convert(filename, filename_new, '-shortest -preset ultrafast -b:a {}k'.format( get_abr()), cw=cw) if self.type == 'audio' and ui_setting.albumArt.isChecked(): try: self.thumb.seek(0) # ffmpeg.add_cover(filename_new, self.thumb, { 'artist': self.username, 'title': self.title }, cw=cw) except Exception as e: s = print_error(e)[-1] print_(s) if ui_setting and ui_setting.subtitle.isChecked(): lang = { 'korean': 'ko', 'english': 'en', 'japanese': 'ja' }[compatstr(ui_setting.subtitleCombo.currentText()).lower()] if lang in self.subtitles: try: subtitle = self.subtitles[lang] filename_sub = u'{}.vtt'.format( os.path.splitext(filename)[0]) downloader.download( subtitle, os.path.dirname(filename_sub), fileName=os.path.basename(filename_sub), overwrite=True) filename_sub_new = u'{}.srt'.format( os.path.splitext(filename_sub)[0]) cw.imgs.append(filename_sub_new) cw.dones.add( os.path.realpath(filename_sub_new).replace( '\\\\?\\', '')) srt_converter.convert(filename_sub, filename_sub_new) cw.setSubtitle(True) finally: try: os.remove(filename_sub) except: pass return filename_new
def get_info(url, cw=None, depth=0, tags_add=None): print_ = get_print(cw) api = PixivAPI() info = {} imgs = [] ugoira_ext = [None, '.gif', '.webp', '.png' ][utils.ui_setting.ugoira_convert.currentIndex( )] if utils.ui_setting else None format_ = compatstr(utils.ui_setting.pixivFormat.currentText() ) if utils.ui_setting else 'id_ppage' max_pid = get_max_range(cw) if api.illust_id(url): # Single post id_ = api.illust_id(url) data = api.illust(id_) login = '******' not in data if FORCE_LOGIN and not login: # raise errors.LoginRequired() if data['xRestrict'] and not login: raise errors.LoginRequired('R-18') info['artist'] = data['userName'] info['artist_id'] = data['userId'] info['raw_title'] = data['illustTitle'] info['title'] = '{} (pixiv_illust_{})'.format(info['raw_title'], id_) info['create_date'] = parse_time(data['createDate']) tags_illust = set(tag['tag'] for tag in data['tags']['tags']) if tags_matched(tags_illust, tags_add, cw): if data['illustType'] == 2: # ugoira data = api.ugoira_meta(id_) ugoira = { 'ext': ugoira_ext, 'delay': [frame['delay'] for frame in data['frames']], } img = Image(data['originalSrc'], url, id_, 0, format_, info, cw, ugoira=ugoira) imgs.append(img) else: data = api.pages(id_) for img in data: img = Image(img['urls']['original'], url, id_, len(imgs), format_, info, cw) imgs.append(img) else: print('tags mismatched') elif '/bookmarks/' in url or 'bookmark.php' in url: # User bookmarks id_ = api.user_id(url) if id_ is None: # id_ = my_id() if id_ == my_id(): rests = ['show', 'hide'] else: rests = ['show'] process_user(id_, info, api) info['title'] = '{} (pixiv_bmk_{})'.format(info['artist'], info['artist_id']) ids = [] ids_set = set() for rest in rests: offset = 0 while len(ids) < max_pid: data = api.bookmarks(id_, offset, rest=rest) c = 0 for id in [work['id'] for work in data['works']]: if id in ids_set: continue ids_set.add(id) ids.append(id) c += 1 if not c: break offset += LIMIT if depth == 0: check_alive(cw) process_ids(ids, info, imgs, cw, depth) elif '/tags/' in url or 'search.php' in url: # Search q = unquote( re.find(r'/tags/([^/]+)', url) or re.find('[?&]word=([^&]*)', url, err='no tags')) info['title'] = '{} (pixiv_search_{})'.format(q, q.replace(' ', '+')) qs = query_url(url) order = qs.get('order', ['date_d'])[0] mode = qs.get('mode', ['all'])[0] s_mode = qs.get('s_mode', ['s_tag_full'])[0] scd = qs.get('scd', [None])[0] ecd = qs.get('ecd', [None])[0] type_ = qs.get('type', ['all'])[0] wlt = qs.get('wlt', [None])[0] wgt = qs.get('wgt', [None])[0] hlt = qs.get('hlt', [None])[0] hgt = qs.get('hgt', [None])[0] blt = qs.get('blt', [None])[0] bgt = qs.get('bgt', [None])[0] ratio = qs.get('ratio', [None])[0] tool = qs.get('tool', [None])[0] logs = [ 'order: {}'.format(order), 'mode: {}'.format(mode), 's_mode: {}'.format(s_mode), 'scd / ecd: {} / {}'.format(scd, ecd), 'type: {}'.format(type_), 'wlt / wgt: {} / {}'.format(wlt, wgt), 'hlt / hgt: {} / {}'.format(hlt, hgt), 'blt / bgt: {} / {}'.format(blt, bgt), 'ratio: {}'.format(ratio), 'tool: {}'.format(tool), ] print_('\n'.join(logs)) ids = [] ids_set = set() p = 1 while len(ids) < max_pid: data = api.search(q, order, mode, p=p, s_mode=s_mode, scd=scd, ecd=ecd, type_=type_, wlt=wlt, wgt=wgt, hlt=hlt, hgt=hgt, blt=blt, bgt=bgt, ratio=ratio, tool=tool) c = 0 for id in [ illust['id'] for illust in data['illustManga']['data'] if 'id' in illust ]: if id in ids_set: continue ids_set.add(id) ids.append(id) c += 1 if not c: break p += 1 process_ids(ids, info, imgs, cw, depth) elif 'bookmark_new_illust.php' in url or 'bookmark_new_illust_r18.php' in url: # Newest works: Following r18 = 'bookmark_new_illust_r18.php' in url id_ = my_id() process_user(id_, info, api) info['title'] = '{} (pixiv_following_{}{})'.format( info['artist'], 'r18_' if r18 else '', info['artist_id']) ids = [] ids_set = set() p = 1 while len(ids) < max_pid: data = api.following(p, r18=r18) c = 0 for id in data['page']['ids']: if id in ids_set: continue ids_set.add(id) ids.append(id) c += 1 if not c: break p += 1 process_ids(ids, info, imgs, cw, depth) elif api.user_id(url): # User illusts m = re.search(r'/users/[0-9]+/([\w]+)/?([^\?#/]*)', url) type_ = { 'illustrations': 'illusts', 'manga': 'manga' }.get(m and m.groups()[0]) if type_: types = [type_] else: types = ['illusts', 'manga'] if m: tag = unquote(m.groups()[1]) or None else: tag = None print_('types: {}, tag: {}'.format(types, tag)) id_ = api.user_id(url) process_user(id_, info, api) data = api.profile(id_) info['title'] = '{} (pixiv_{})'.format(info['artist'], info['artist_id']) ids = [] for type_ in types: illusts = data[type_] if not illusts: continue ids += list(illusts.keys()) ids = sorted(ids, key=int, reverse=True) if not ids: raise Exception('no imgs') process_ids(ids, info, imgs, cw, depth, tags_add=[tag] if tag else None) else: raise NotImplementedError() info['imgs'] = imgs[:max_pid] return info