def pixiv_render(item, base_path, debug=False): global pixiv_api if pixiv_api is None: pixiv_api = AppPixivAPI() pixiv_api.login(pixiv_username, pixiv_password) illust_id = get_illust_id(item.get_remote()) detail = pixiv_api.illust_detail(illust_id) path = (str(detail['illust']['user']['name']) + '_' + str(detail['illust']['user']['id'])) cpath(base_path + path) urls = [] if detail['illust']['page_count'] > 1: for page in detail['illust']['meta_pages']: page_url = None try: page_url = page['image_urls']['original'] except (NameError, KeyError): try: page_url = list(page['image_urls'].values())[-1] except (NameError, KeyError): pass if page_url is not None: urls.append(page_url) if len(urls) <= 0: try: urls.append( detail['illust']['meta_single_page']['original_image_url']) except (NameError, KeyError): try: urls.append(detail['illust']['image_urls']['large']) except (NameError, KeyError): pass ret = [] for url in urls: name = str(detail['illust']['title']) + '_' + str( illust_id) + os.path.basename(url) ret.append(path + '/' + name) pixiv_api.download(url, name=name, path=os.path.abspath(base_path + path)) if debug: print('.', end='', flush=True) return ret, detail
async def picSearch(self, ctx, title: str = ""): g_config = ConfigManager.instance().get_global_config() pixivAPI = AppPixivAPI() # pixivAPI.login(config_dict.get("Pixiv")['ID'], config_dict.get("Pixiv")['Pass']) try: pixivAPI.auth(refresh_token=g_config.get("Pixiv")["TOKEN"]) except: return await ctx.send("MAID ERROR: F**K PIXIV! REQUEST FAILED, PLEASE TRY AGAIN!") if title == "": try: result = pixivAPI.illust_ranking('day_male') except: return await ctx.send("MAID ERROR: F**K PIXIV! REQUEST FAILED, PLEASE TRY AGAIN!") elif title == "r18": try: result = pixivAPI.illust_ranking('day_male_r18') except: return await ctx.send("MAID ERROR: F**K PIXIV! REQUEST FAILED, PLEASE TRY AGAIN!") else: try: result = pixivAPI.search_illust(title, sort="popular_desc", search_target='title_and_caption') except: return await ctx.send("MAID ERROR: F**K PIXIV! REQUEST FAILED, PLEASE TRY AGAIN!") embed = nextcord.Embed(color=nextcord.Color.dark_red()) if len(result.illusts) != 0: illust = result.illusts[random.randint(0, len(result.illusts) - 1)] imagePresent = os.path.isfile(f'illust.jpg') if (imagePresent): os.remove(f'illust.jpg') pixivAPI.download(illust.image_urls.large, fname=f'illust.jpg') embed.title = illust.title embed.url = f"https://www.pixiv.net/artworks/{illust.id}" embed.set_image(url="attachment://illust.jpg") embed.set_author(name=illust.user.name, url=f"https://www.pixiv.net/users/{illust.user.id}") await ctx.send(embed=embed, file=nextcord.File(f'illust.jpg')) else: embed.title = "Image can\'t be found! 无法找到图片!" await ctx.send(embed=embed)
def main(): sni = False if not sni: api = AppPixivAPI() else: api = ByPassSniApi() # Same as AppPixivAPI, but bypass the GFW api.require_appapi_hosts() api.auth(refresh_token=_REFRESH_TOKEN) # get rankings json_result = api.illust_ranking("day", date="2019-01-01") directory = "illusts" if not os.path.exists(directory): os.makedirs(directory) # download top3 day rankings to 'illusts' dir for idx, illust in enumerate(json_result.illusts[:4]): image_url = illust.meta_single_page.get("original_image_url", illust.image_urls.large) print("%s: %s" % (illust.title, image_url)) # try four args in MR#102 if idx == 0: api.download(image_url, path=directory, name=None) elif idx == 1: url_basename = os.path.basename(image_url) extension = os.path.splitext(url_basename)[1] name = "illust_id_%d_%s%s" % (illust.id, illust.title, extension) api.download(image_url, path=directory, name=name) elif idx == 2: api.download(image_url, path=directory, fname="illust_%s.jpg" % (illust.id)) else: # path will not work due to fname is a handler api.download( image_url, path="/foo/bar", fname=open("%s/illust_%s.jpg" % (directory, illust.id), "wb"), )
import json import os import re from pixivpy3 import AppPixivAPI with open('credentials.json') as cf: credentials = json.load(cf) with open('urls.txt') as uf: urls = [u.split('/')[-1] for u in uf if 'pixiv.net' in u] ids = [re.findall(r'\d+', id)[0] for id in urls] api = AppPixivAPI() api.login(credentials['email'], credentials['password']) for id in ids: json_result = api.illust_detail(id) api.download(json_result.illust['meta_single_page']['original_image_url'])
class PixivDownloader: def __init__(self, client=None, username=None, password=None, log_level=logging.WARNING): if not client and (bool(username) != bool(password)): raise AttributeError( 'If no client is given both username and password must be given' ) if client: self.api = client else: self.api = AppPixivAPI() if not client and username and password: self.api.login(username, password) self.logger = logging.getLogger('PixivDownloader') stdout = logging.StreamHandler() self.logger.addHandler(stdout) self.logger.setLevel(log_level) def login(self, username=None, password=None, refresh_token=None): if refresh_token: self.logger.info('Loging in with refresh_token') elif username: self.logger.info('Loging in with username %s', username) else: self.logger.info('Loging') return self.api.auth(username=username, password=password, refresh_token=refresh_token) def logout(self): self.logger.info('Logout') self.api = AppPixivAPI() def get_id_from_url(self, url): path = urlparse(url).path ids = re.findall('(\\d+)', path) if not ids: raise ValueError('Url does not contain post id') return ids[0] def download_by_id(self, post_id, output_dir): data = self.api.illust_detail(post_id) if data.get('error'): raise PixivDownloaderError( 'Could not get post info or post doesn\'t exist.', data) return self.download(data.illust, output_dir) def download_by_url(self, url, output_dir): return self.download_by_id(self.get_id_from_url(url), output_dir) def download(self, post, output_dir): output_dir = Path(output_dir).expanduser().absolute() if not os.path.isdir(output_dir): os.makedirs(output_dir) self.logger.debug('Created dir "%s"', output_dir) if post.type == 'illust' and not post.meta_pages: downloader = self.download_illust type = 'Image' elif post.type == 'illust' and post.meta_pages: downloader = self.download_illust_collection type = 'Image Collection' elif post.type == 'ugoira': downloader = self.download_ugoira type = 'Video' elif post.type == 'manga': downloader = self.download_manga type = 'Manga' else: raise PixivDownloaderError( f'Post type "{post.type}" not supported') self.logger.info('Initialize "%s" downloader for post %s', type, post.id) return downloader(post, output_dir) def download_illust(self, post, output_dir): image_url = post.meta_single_page.get('original_image_url', post.image_urls.large) if '_webp' in image_url: extension = 'webp' else: extension = os.path.splitext(image_url)[1].lstrip('.') filename = self.get_filename(post, extension) self.logger.info('Downloading "%s"', image_url) self.api.download(image_url, path=output_dir, name=filename, replace=True) yield (Path(output_dir) / filename).absolute() def download_illust_collection(self, post, output_dir): output_dir = Path(output_dir) yield from self._downloade_meta_pages(post, output_dir) def download_manga(self, post, output_dir): output_dir = Path(output_dir) / f'{post.title}-{post.user.account}' if not output_dir.is_dir(): output_dir.mkdir(parents=True, exist_ok=True) self.logger.debug('Created dir "%s"', output_dir) yield from self._downloade_meta_pages(post, output_dir) def _downloade_meta_pages(self, post, output_dir): for index, image in enumerate(post.meta_pages, 1): image_url = image.image_urls.get('original', image.image_urls.large) if '_webp' in image_url: extension = 'webp' else: extension = os.path.splitext(image_url)[1].lstrip('.') filename = self.get_filename(post, extension, suffix=f'-{index:0>2}') self.logger.info('Downloading "%s"', image_url) self.api.download(image_url, path=str(output_dir), name=filename, replace=True) yield (output_dir / filename).absolute() def download_ugoira(self, post, output_dir): ugoira_data = self.api.ugoira_metadata(post.id).ugoira_metadata zip_url = ugoira_data.zip_urls.get('large', ugoira_data.zip_urls.medium) with TemporaryDirectory() as dir: temp_dir = Path(dir) filename = '{post.id}.zip' self.logger.info('Downloading "%s"', zip_url) self.api.download(zip_url, path=str(temp_dir), name=filename) frames_dir = temp_dir / 'frames' os.mkdir(frames_dir) self._extract_zip(temp_dir / filename, frames_dir) video_name = self.get_filename(post, 'mp4') video_file = temp_dir / video_name self._generate_mp4_from_frames(video_file, frames_dir, ugoira_data.frames[0].delay) final_path = (Path(output_dir) / video_name).absolute() shutil.move(video_file, final_path) yield final_path.absolute() def get_filename( self, post, extension, prefix=None, suffix=None, ): suffix = suffix or '' prefix = prefix or '' filename = f'{prefix}{post.id}-{post.title}{suffix}.{extension}'.replace( '/', '_').replace(' ', '_') return filename def _extract_zip(self, zip_file, output_dir): self.logger.info('Extract "%s"', zip_file) with ZipFile(zip_file, 'r') as zip_file: zip_file.extractall(output_dir) def _generate_mp4_from_frames(self, output_file, frames_dir, delay): self.logger.info('Generate video to "%s"', output_file) frames = sorted( map(lambda file: os.path.join(str(frames_dir), file), os.listdir(frames_dir))) frames = list(map(imread, frames)) framerate = 1000 / delay height, width, layers = frames[0].shape video = VideoWriter(str(output_file), VideoWriter_fourcc(*'mp4v'), framerate, (width, height)) for frame in frames: video.write(frame) destroyAllWindows() video.release()
class APIHandler: """Singleton that handles all the API interactions in the program""" def __init__(self): self._api_thread = threading.Thread(target=self._login) self._login_started = False self._login_done = False self._api = AppPixivAPI() # Object to login and request on # Set in self.start() (because singleton is instantiated before config) self._credentials: 'dict[str, str]' self._response: 'Json' def start(self, credentials): """Start logging in. The only setup entry point that is public""" if not self._login_started: self._credentials = credentials self._api_thread.start() self._login_started = True def _await_login(self): """Wait for login to finish, then assign PixivAPI session to API""" if not self._login_done: self._api_thread.join() self._login_done = True def _login(self): self._login_with_token() def _login_with_token(self): # TODO: refresh the token if it expired try: self._response = self._api.auth( refresh_token=self._credentials['refresh_token']) except PixivError as e: print('') print(e) print('If this is a cloudflare captcha issue, just quit and retry') print('It is not a problem with koneko or pixivpy') print('Otherwise, please report to ' 'https://github.com/akazukin5151/koneko/issues') print("Press 'q' and enter to exit") #else: #print('Login success!') # Public API requests for user id def get_user_id(self) -> 'Json': self._await_login() return self._response['user']['id'] # Public API request functions for each mode @funcy.retry(tries=3, errors=(ConnectionError, PixivError)) @utils.spinner('') def artist_gallery(self, artist_user_id, offset) -> 'Json': """Mode 1""" self._await_login() return self._api.user_illusts(artist_user_id, offset=offset) @funcy.retry(tries=3, errors=(ConnectionError, PixivError)) def protected_illust_detail(self, image_id) -> 'Json': """Mode 2""" self._await_login() return self._api.illust_detail(image_id) @funcy.retry(tries=3, errors=(ConnectionError, PixivError)) def following_user_request(self, user_id, publicity, offset) -> 'Json': """Mode 3""" self._await_login() return self._api.user_following(user_id, restrict=publicity, offset=offset) @funcy.retry(tries=3, errors=(ConnectionError, PixivError)) def search_user_request(self, searchstr, offset) -> 'Json': """Mode 4""" self._await_login() return self._api.search_user(searchstr, offset=offset) @funcy.retry(tries=3, errors=(ConnectionError, PixivError)) @utils.spinner('') def illust_follow_request(self, restrict, offset) -> 'Json': """Mode 5""" self._await_login() return self._api.illust_follow(restrict=restrict, offset=offset) @funcy.retry(tries=3, errors=(ConnectionError, PixivError)) @utils.spinner('') def illust_related_request(self, image_id, offset) -> 'Json': """Mode 15 (1.5 * 10 so it's an int)""" self._await_login() return self._api.illust_related(illust_id=image_id, offset=offset) @funcy.retry(tries=3, errors=(ConnectionError, PixivError)) @utils.spinner('') def illust_recommended_request(self, offset) -> 'Json': """Mode 6""" self._await_login() return self._api.illust_recommended(offset=offset) # Download @funcy.retry(tries=3, errors=(ConnectionError, PixivError)) def protected_download(self, url, path, name) -> 'IO': """Protect api download function with funcy.retry so it doesn't crash""" self._await_login() self._api.download(url, path=path, name=name)
class PixivSpider: def __init__(self): """ Init PixivSpider """ self.api = AppPixivAPI() self.directory = 'download' if not os.path.exists('info.json'): self.data = {'illusts': []} self.count = 0 print("Create new info.json file") else: with open('info.json', 'r') as f: self.data = json.load(f) self.count = len(self.data['illusts']) print("Load existing info.json file") print("Existed illusts count: %d" % self.count) self.illusts_names = Set() for illust in self.data['illusts']: self.illusts_names.add(illust['name']) def login(self): """ Login pixiv.net """ with open('login.json') as f: login = json.load(f) self.api.login(login["username"], login["password"]) print("Login pixiv.net with user %s.", login["username"]) def exit(self): """ Stop spider and print logs """ with open('info.json', 'w') as f: json.dump(self.data, f, indent=2) print("Finish! Total downloaded illusts number: %d" % self.count) def create_download_folder(self): """ Setup image download directory """ if not os.path.exists(self.directory): os.makedirs(self.directory) def download_illusts(self, illusts=None): """ Download illusts """ for illust in illusts: image_url = illust.meta_single_page.get('original_image_url', illust.image_urls.large) print(u"👀 Found illust: %s (%s)" % (illust.title, image_url)) url_basename = os.path.basename(image_url) extension = os.path.splitext(url_basename)[1] name = "%d_%s%s" % (illust.id, illust.title, extension) name = name.replace('/', ':') if name not in self.illusts_names: self.count += 1 self.data['illusts'].append({ 'id': self.count, 'name': name, 'illust_id': illust.id, 'illustrator_id': illust.user.id, 'source_url': image_url }) self.illusts_names.add(name) name = "%d_" % self.count + name try: self.api.download(image_url, path=self.directory, name=name) except PixivError: print(u"😢 PixivError!!! Skip this illust") continue print(u"✅ Download illust: %s (%s)" % (illust.title, image_url)) else: print(u"✨ Already download: %s: %s" % (illust.title, image_url)) def get_user_ids_from_illusts(self, illusts): """ Get user ids by illusts """ user_ids = [] for illust in illusts: user_ids.append(illust.user.id) return user_ids def get_top_ranking_illusts(self, count=DEFAULT_DOWNLOAD_TOP_RANKING_COUNT, ranking_type=RankingType.DAY, date=datetime.today().strftime("%Y-%m-%d"), download=False): """ Get top ranking illusts :count: the number of illusts that we want to download :ranking_type: ranking type :date: date :download: download flag """ json_result = self.api.illust_ranking(ranking_type, date=date) illusts = self.get_illusts_from_all_pages(json_result, json_result.illusts, count, download) return illusts[:count] def get_recommended_illusts(self, count=DEFAULT_DOWNLOAD_RECOMMENDED_COUNT, content_type=ContentType.ILLUST, download=False): """ Get recommended illusts :count: the number of illusts that we want to download :content_type: content type :download: download flag """ json_result = self.api.illust_recommended(content_type) illusts = self.get_illusts_from_all_pages(json_result, json_result.illusts, count, download) return illusts[:count] def get_illusts_by_user_ids(self, user_ids, count=DEFAULT_DOWNLOAD_EACH_USER_COUNT, content_type=ContentType.ILLUST, download=False): """ Get illusts by user id """ ret = {} for user_id in user_ids: json_result = self.api.user_illusts(user_id=user_id, type=content_type) illusts = self.get_illusts_from_all_pages(json_result, json_result.illusts, count, download) ret[user_id] = illusts[:count] return ret def get_illusts_from_all_pages(self, json_result, illusts, count, download=False): """ Get illusts from all pages """ while len(json_result) != 0 and len(illusts) < count: next_qs = self.api.parse_qs(json_result.next_url) if next_qs is None: break try: json_result = self.api.illust_ranking(**next_qs) except TypeError: break illusts += json_result.illusts if download: count = min(count, len(illusts)) self.download_illusts(illusts=illusts[:count]) return illusts