예제 #1
0
 def get_page_resource(cls, url):
     """
     :param url:
     :return:
     """
     data = curl_data(url, open_virtual_ip=True)
     return data
예제 #2
0
 def test(self):
     url = "https://www.crunchyroll.com/videos/anime/popular/ajax_page?pg=3"
     data = curl_data(
         url,
         referer=
         "https://www.crunchyroll.com/videos/anime/popular/ajax_page?pg=3",
         open_virtual_ip=True)
     debug(data)
예제 #3
0
 def __get_index_page_data(cls, url):
     data = curl_data(url=url, open_virtual_ip=True)
     # with open("static/spider/page_index.html", "wb") as f:
     #     f.write(data.encode("utf-8"))
     #     f.close()
     # with open("static/spider/page_index.html", "rb") as f:
     #     data = f.read().decode("utf-8")
     #     f.close()
     return data
예제 #4
0
 def __get_frame_page(cls, url):
     url = "https://codecanyon.net" + url
     data = curl_data(url, open_virtual_ip=True)
     # with(open("static/spider/game_frame_page.html", "rb")) as f:
     #     data = f.read().decode("utf-8")
     #     f.close()
     # with open("static/spider/game_frame_page.html", "wb") as f:
     #     f.write(data.encode("utf-8"))
     #     f.close()
     return data
 def __handle(self, item):
     with self.auto_handle_exception():
         debug("开始下载 ==========> {name}".format(name=item["download_url"]))
         data = curl_data(item["download_url"])
         debug(data)
         with open(
                 "static/spider/game_download/{name}.apk".format(
                     name=item["id"]), "wb") as f:
             f.write(data)
             f.close()
예제 #6
0
 def test_ip(self):
     url = "https://a-vrv.akamaized.net/evs/1631771ddd0df6e6f7c60770955fe64f/assets/p/6bbmnx58kgajfsd_,1278465.mp4,1278467.mp4,1278463.mp4,1278461.mp4,1278451.mp4,.urlset/fragment-21-f1-a1-x3.m4s?t=exp=1565753706~acl=/evs/1631771ddd0df6e6f7c60770955fe64f/assets/*~hmac=be0ef2b7b8215367e2069db78781d28627a051399f80b10240d73da945ffc162"
     # url = "https://nl.tan90.club/"
     data = curl_data(
         url=url,
         referer="https://static.crunchyroll.com/vilos/player.html",
         open_virtual_ip=True)
     with open("test.mp4", "wb") as f:
         f.write(data)
         f.close()
     debug(data)
예제 #7
0
 def vote(self):
     url = "http://fyxqt.fuyuxiangqi.cn/wxtp/web/aipainew/aipainewAction!dianji.action?t={time_stamp}".format(
         time_stamp=int(time.time() * 1000))
     params = {
         "id": self.id,
         "hdid": self.wx_id,
         "yz": ""
     }
     header = self.__get_header()
     data = curl_data(url, value=params, cookie=self.cookie, header=header)
     debug(data)
예제 #8
0
 def get_comic_data(self, item):
     url = "https://fancuishou.cn/home/api/chapter_list/tp/{id}-1-1-1000"
     debug("获取id为 ========> {id}的漫画".format(id=item["comic_id"]))
     url = url.format(id=item["comic_id"])
     debug(url)
     try:
         data = curl_data(url, open_virtual_ip=True)
     except Exception as e:
         debug("curl_data 获取数据失败{error}: ".format(error=e.__str__()))
     # with open("static/comic/single_list.json") as f:
     #     data = f.read()
     # with open("static/comic/single_list.json", "wb") as f:
     #     f.write(data.encode("utf-8"))
     self.handle_comic_data(data)
예제 #9
0
 def handle(self):
     # url = "https://i.ytimg.com/vi/9OHkwJpS6u4/hqdefault.jpg?sqp=-oaymwEZCPYBEIoBSFXyq4qpAwsIARUAAIhCGAFwAQ==&rs=AOn4CLDEO8flAyYWStTIWI3aLoirwz73yg"
     # url = "https://i.ytimg.com/vi/9OHkwJpS6u4/hqdefault.jpg"
     # url = "https://www.google.com/"
     url = "http://192.168.50.177:8083/download"
     # url = "https://www386.hlsmp4.com/token=b2LDM4PEjOWh5XvREsjfdw/1567685699/0.0.0.0/67/f/9b/11b5f88fd13540ae36950a5a0daa19bf-480p.mp4"
     header = {
         "User-Agent":
         "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36",
         # "upgrade-insecure-requests": "1"
     }
     data = curl_data(url,
                      value={"name": "ok"},
                      header=header,
                      open_virtual_ip=True)
     debug(data)
예제 #10
0
 def __download(self, item):
     # 检查文件是否已经存在
     if os.path.exists("static/spider/epub/{filename}.epub".format(
             filename=item['id'])):
         debug("电子书:{title} ========> 已经存在, 跳过".format(title=item['title']))
         return
     with self.auto_handle_exception(error_callback=self.__error_callback,
                                     throw_exception_flag=True,
                                     item=item):
         data = curl_data(self.url_prefix + item['source_url'])
         with open(
                 "static/spider/epub/{filename}.epub".format(
                     filename=item['id']), "wb") as f:
             f.write(data)
             f.close()
         debug("电子书:{title} =======> 下载成功".format(title=item['title']))
예제 #11
0
 def __get_page_data(self):
     """
     获取书籍主页面数据
     :return:
     """
     try:
         data = curl_data(self.url)
     except Exception as e:
         data = ""
         debug("get index page data error: {error}".format(error=e))
     # with open("static/spider/ebook_index_page.html", "wb") as f:
     #     f.write(data.encode("utf-8"))
     #     f.close()
     # with open("static/spider/ebook_index_page.html", "rb") as f:
     #     data = f.read().decode("utf-8")
     #     f.close()
     return data
예제 #12
0
 def __get_book_page(cls, url):
     """
     获取书籍详情页数据
     :param url:
     :return:
     """
     try:
         data = curl_data(url)
     except Exception as e:
         data = False
         debug("书籍详情页获取失败, error: {error}".format(error=e.__str__()))
     # with open("static/spider/ebook_book_page.html", "wb") as f:
     #     f.write(data.encode("utf-8"))
     #     f.close()
     # with open("static/spider/ebook_book_page.html", "rb") as f:
     #     data = f.read().decode("utf-8")
     #     f.close()
     return data
예제 #13
0
 def __get_page_data(self):
     data = curl_data(self.url)
     # with open("static/spider/page_first.html", "rb") as f:
     #     data = f.read().decode("utf-8")
     #     f.close()
     bs_html = BeautifulSoup(data, "html.parser")
     live_url = bs_html.find(name="a", attrs={"class": "live-preview"})
     try:
         live_url = live_url.attrs['href']
     except Exception as e:
         live_url = False
         debug("游戏播放页链接获取失败,error:" + e.__str__())
     if not live_url:
         return live_url, None
     else:
         # with open("static/spider/page_first.html", "wb") as f:
         #     f.write(data.encode("utf-8"))
         #     f.close()
         pass
     return live_url, data
예제 #14
0
def get_comic_image():
    url = request.values.get("url")
    data = curl_data(url, open_virtual_ip=True)
    return Response(data, mimetype="image/jpeg")
예제 #15
0
 def run(self):
     header = self.__get_header()
     params = self.__get_params()
     self.cookie = self.__get_cookie()
     data = curl_data(url=self.url, value=params, header=header, cookie=self.cookie)
     debug(data)
예제 #16
0
 def run(self):
     url = 'http://dat.c4fungames.com/dm/at/lp'
     data = curl_data(url)
     debug(data)
예제 #17
0
 def get_page_list_data(self, page, url):
     # with open('static/comic/data.json') as f:
     #     data = f.read()
     request_url = url.format(page=page)
     data = curl_data(request_url, open_virtual_ip=True)
     return data