예제 #1
0
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     # forces reauth() to trigger if any method is called:
     self.last_auth = datetime.datetime.fromtimestamp(0)
     self.refresh_token = ""
     self.aapi = AppPixivAPI(**kwargs)
     self.papi = PixivAPI(**kwargs)
예제 #2
0
 def __init__(
         self,
         auth,
         work_path=os.path.abspath('../pixiv/'),
 ):
     self._api = PixivAPI()
     self._api.login(*auth)
     self._wd = work_path
예제 #3
0
 def __connect(self):
     if self.client is None:
         try:
             self.client = PixivAPI()
             self.client.login(cfg['pixiv']['帳號'], cfg['pixiv']['密碼'])
         except Exception as e:
             raise e
             return False
     return True
예제 #4
0
	def __init__(self, _user, _pass, default_path=r'F:/PIXIV'):
		self._user, self._pass = (_user, _pass)

		self.api = PixivAPI()
		self.pixiv_utils = PixivUtils()

		self.pastes = Queue() 
		self.default_path = default_path

		self.local_pixiv_ids = []
예제 #5
0
 def __init__(self, username, password):
     self._username = username
     self._password = password
     _fglfile = open('src/main/resources/fgl.txt', 'r')
     self._fgl = []
     for line in _fglfile.readlines():
         self._fgl.append(line.strip())
     _fglfile.close()
     self._api = PixivAPI(**_REQUESTS_KWARGS)
     self._api.login(self._username, self._password)
예제 #6
0
파일: pixie.py 프로젝트: Xdynix/PixivPixie
    def __init__(self, auto_re_login=True, **requests_kwargs):
        self.auto_re_login = auto_re_login
        self._requests_kwargs = requests_kwargs

        self._papi = PixivAPI(**requests_kwargs)
        self._aapi = AppPixivAPI(**requests_kwargs)

        self._has_auth = False
        self._last_login = None
        self._check_auth_lock = Lock()

        self._username = None
        self._password = None
예제 #7
0
 def __init__(self, name, app_config={}):
     config_path = Path(app_config.get('handlers_config_dir', '.')) / 'pixiv.toml'
     data_path = Path(app_config.get('data_dir', './data/')) / '{}.toml'.format(name)
     self.config = Config(config_path, write_defaults=True, defaults={
         'refresh': 'xxxx',
     })
     self.config.save()
     self.data = Config(data_path)
     self.age_filter = None
     self.api = PixivAPI()
     if self.config.get('refresh'):
         print('logging in to Pixiv...')
         login_response = self.api.auth(refresh_token=self.config['refresh'])
         print('logged in into account {0.name} ({0.account}) [{0.id}]'.format(login_response['response']['user']))
예제 #8
0
파일: pixiv.py 프로젝트: AmazingThew/ArtBot
 def __init__(self, dbDict, config):
     self.config = config
     self.dbDict = dbDict
     self.username = config['PIXIV_USERNAME']
     self.password = config['PIXIV_PASSWORD']
     self.imageDirectory = os.path.join(config['PIXIV_DOWNLOAD_DIRECTORY'],
                                        'images')
     self.ugoiraDirectory = os.path.join(config['PIXIV_DOWNLOAD_DIRECTORY'],
                                         'ugoira')
     self.avatarDirectory = os.path.join(config['PIXIV_DOWNLOAD_DIRECTORY'],
                                         'avatars')
     os.makedirs(self.imageDirectory, exist_ok=True)
     os.makedirs(self.ugoiraDirectory, exist_ok=True)
     os.makedirs(self.avatarDirectory, exist_ok=True)
     self.api = PixivAPI()
     self.authorize()
예제 #9
0
 def importIllustJSON(self):
     #Login to Pixiv API
     self.api = PixivAPI()
     self.api.login(pixivLogin["pixivusername"],
                    pixivLogin["pixivpassword"])
     userURL = "https://www.pixiv.net/member_id="
     self.JSON = self.api.works(self.ID)['response'][0]
     self.manga = self.JSON['is_manga']
     self.account = self.JSON['user']['account']
     self.name = self.JSON['user']['name']
     self.user_ID = self.JSON['user']['id']
     self.user_URL = userURL + str(self.user_ID)
     self.title = self.JSON['title']
     self.tags = self.JSON['tags']
     self.pages = self.JSON['page_count']
     if self.pages > 1:
         for page in range(self.pages - 1):
             self.image_URLs.append(self.JSON['metadata']["pages"][page]
                                    ["image_urls"]['large'])
     else:
         self.image_URLs.append(self.JSON['image_urls']['large'])
예제 #10
0
    def run(self):
        self.client = PixivAPI()
        self.client.login(cfg['pixiv']['帳號'], cfg['pixiv']['密碼'])

        while True:
예제 #11
0
from threading import Thread 
from api import cfg
from pixivpy3 import PixivAPI

try:
    pixiv_client = PixivAPI()
    pixiv_client.login(cfg['pixiv']['帳號'], cfg['pixiv']['密碼'])
except Exception as e:
    print('<pixiv模組初始失敗> %s' % str(e))
    pixiv_client = None


class Pixiv(Thread):
    def __init__(self):
        Thread.__init__(self)
        
    def run(self):
        self.client = PixivAPI()
        self.client.login(cfg['pixiv']['帳號'], cfg['pixiv']['密碼'])

        while True:



def pixiv_search(key):
    if pixiv_client is None:
        return '此功能現在沒有開放'
    f = pixiv_client.search_works(key, mode='tag')
    d = []
    for i in f['response']:
        d.append('(*%s) %s\n%s' % (i['stats']['favorited_count']['public'], i['title'], 'pixiv.net/member_illust.php?mode=medium&illust_id=' % i['id']))
예제 #12
0
class Worker:
    writes = 0
    api = PixivAPI()
    root_dir = "image"
    size = "large"
    latest_time = "2000-01-18 23:41:35"

    def login(self, username, password):
        self.token = self.api.login(username, password)

    #循环检查路径是否存在
    def check_root_dir(self, path_list):
        if not os.path.exists(self.root_dir):
            os.mkdir(self.root_dir)
        cur_path = self.root_dir
        for path in path_list:
            cur_path = os.path.join(cur_path, path)
            if not os.path.exists(cur_path):
                os.mkdir(cur_path)

    def check_result(self, result, filepath):
        if cmp(result["status"], "success") != 0:
            file(filepath, "w").write(json.dumps(result))
            raise PixivError(json.dumps(result))

    def pull_following_works(self,
                             time,
                             nums=10,
                             flt={
                                 'illustration': True,
                                 'manga': True,
                                 'ugoira': True
                             }):
        result = self.api.me_following_works()
        self.check_root_dir(["following"])
        curpg = 1
        per_pg = 30
        self.check_result(
            result, os.path.join(self.root_dir, "following", "error.json"))
        file(os.path.join(self.root_dir, "following", "metadata.json"),
             "w").write(json.dumps(result))
        total = result["pagination"]["total"]
        nums = (nums < total and [nums] or [total])[0]
        for i in range(0, nums):
            if curpg < i / per_pg + 1:
                curpg += 1
                result = self.api.me_following_works(page=curpg)
                self.check_result(
                    result,
                    os.path.join(self.root_dir, "following", "error.json"))

            idx = i % per_pg
            info_json = result["response"][idx]
            reup_time = info_json["reuploaded_time"]
            #过滤掉不想要的图片
            if not flt[info_json["type"]]:
                continue
            #这个方法在抓取排行时不可取
            #在上次扒取的时间点之前的图片就放弃
            #if datetime.strptime(reup_time,"%Y-%m-%d %H:%M:%S") < time:
            #    continue
            self.save_work(info_json, os.path.join(self.root_dir, "following"))
            if datetime.strptime(self.latest_time,
                                 "%Y-%m-%d %H:%M:%S") < datetime.strptime(
                                     reup_time, "%Y-%m-%d %H:%M:%S"):
                self.latest_time = reup_time
        return result

    def pull_ranking_works(self,
                           time,
                           nums=10,
                           work_type="all",
                           rank_type="daily",
                           flt={
                               'illustration': True,
                               'manga': True,
                               'ugoira': True
                           }):
        self.check_root_dir([work_type, rank_type])
        result = self.api.ranking(ranking_type=work_type, mode=rank_type)
        curpg = 1
        per_pg = 50

        self.check_result(
            result,
            os.path.join(self.root_dir, work_type, rank_type, "error.json"))

        file(
            os.path.join(self.root_dir, work_type, rank_type, "metadata.json"),
            "w").write(json.dumps(result))
        total = result["pagination"]["total"]
        nums = (nums < total and [nums] or [total])[0]
        for i in range(0, nums):
            if curpg < i / per_pg + 1:
                curpg += 1
                result = self.api.me_following_works(page=curpg,
                                                     ranking_type=rank_type,
                                                     mode=work_type)
                self.check_result(
                    result,
                    os.path.join(self.root_dir, work_type, rank_type,
                                 "error.json"))

                file(
                    os.path.join(self.root_dir, work_type, rank_type,
                                 "metadata.json"),
                    "w").write(json.dumps(result))
            idx = i % per_pg
            info_json = result["response"][0]["works"][idx]["work"]
            reup_time = info_json["reuploaded_time"]
            #过滤掉不想要的图片
            if not flt[info_json["type"]]:
                continue
            #在上次扒取的时间点之前的图片就放弃
            #if datetime.strptime(reup_time,"%Y-%m-%d %H:%M:%S") < time:
            #    continue
            self.save_work(info_json,
                           os.path.join(self.root_dir, work_type, rank_type))
            if datetime.strptime(self.latest_time,
                                 "%Y-%m-%d %H:%M:%S") < datetime.strptime(
                                     reup_time, "%Y-%m-%d %H:%M:%S"):
                self.latest_time = reup_time
        return result

    def save_work(self, info_json, path):
        #过滤不想保存的作品
        if cmp(info_json["type"], "ugoira") == 0:
            self.save_ugoira(info_json, path)
        if cmp(info_json["type"], "illustration") == 0:
            self.save_image(info_json, path)
        if cmp(info_json["type"], "manga") == 0:
            self.save_image(info_json, path)
        return

    def save_ugoira(self, info_json, path):
        full_info = self.api.works(info_json["id"])
        urls = full_info["response"][0]["metadata"]["zip_urls"]

        first_url = None
        for item in urls:
            first_url = urls[item]
            break
        file_name = first_url[first_url.rfind("/") + 1:]
        if os.path.exists(os.path.join(path, file_name)):
            #print u"文件已存在:跳过"
            return first_url
        file(os.path.join(path, "%s.json" % (file_name)),
             "w").write(json.dumps(full_info))
        res = self.api.auth_requests_call("GET", first_url)
        data = res.content
        file(os.path.join(path, file_name), "wb").write(data)
        self.writes += int(res.headers["content-length"])
        #not finish
        print first_url
        print res.headers["content-length"]
        return first_url

    def save_image(self, info_json, path):
        page_count = info_json["page_count"]
        #如果是漫画,也就是多幅图片就存在id文件夹下面
        if page_count != 1:
            cur_path = os.path.join(path, "%d" % info_json["id"])
            if not os.path.exists(cur_path):
                os.mkdir(cur_path)
        else:
            cur_path = path

        for i in range(0, page_count):
            origin_url = info_json["image_urls"][self.size]
            p_idx = origin_url.rfind("_p")
            if cmp(self.size, "large") == 0:
                r_idx = origin_url.rfind(".")
            else:
                r_idx = origin_url.rfind("_")

            img_url = "%s_p%d%s" % (origin_url[:p_idx], i, origin_url[r_idx:])
            file_name = img_url[img_url.rfind("/") + 1:]
            if os.path.exists(os.path.join(cur_path, file_name)):
                #print u"文件已存在:跳过"
                continue
            res = self.api.auth_requests_call("GET", img_url)
            data = res.content
            file(os.path.join(cur_path, file_name), "wb").write(data)
            self.writes += int(res.headers["content-length"])
            print os.path.join(cur_path, file_name)
            print res.headers["content-length"]
예제 #13
0
from create_database import configs
import time
from pixivpy3 import PixivAPI

_REQUESTS_KWARGS = {
    'proxies': {
        'https': configs.proxy,
    },
    'verify':
    True,  # PAPI use https, an easy way is disable requests SSL verify
}

start_time = time.time()
api = PixivAPI(**_REQUESTS_KWARGS)
api.set_auth(configs.pixiv.access_token, configs.pixiv.refresh_token)
# api.login(configs.pixiv.user, configs.pixiv.passwd)
# json_result = api.illust_detail(59580629)
# illust = json_result.illust
# print(">>> origin url: %s" % illust.image_urls['large'])
# api.auth(configs.pixiv.user, configs.pixiv.passwd, configs.pixiv.refresh_token)
json_result = api.works(46363414)
print(json_result)
illust = json_result.response[0]
print(">>> %s, origin url: %s" % (illust.caption, illust.image_urls['large']))

end_time = time.time()
print(end_time - start_time, 's')
예제 #14
0
 def __init__(self, credentials):
     self.apapi = AppPixivAPI()
     self.papi = PixivAPI()
     self.apapi.auth(refresh_token=credentials[0])
     self.papi.auth(refresh_token=credentials[0])