def run(self): if not config.target_path: error("none target path") exit() elif checkPath(config.target_path): self.image_pools = jpg_walk(config.target_path, config.types_filter) while self.image_pools: self.get_exif_datas(self.image_pools.pop()) if config.location and config.rest_api_key: trio.run(self.init_session) trio.run(self.find_all_address) if config.save_image: info("copy images") for index, (key, item) in enumerate(self.res_pools.items()): to_file = pathlib.Path( self.image_path, ".".join([ str(index), pathlib.Path(item["path"]).name.split(".")[-1] ]), ) my_file = pathlib.Path(item["path"]) shutil.copy(str(my_file), str(to_file)) self.res_pools[key]["path"] = "/".join( str(to_file).split("/")[2:]) datas = list( sorted( [item for _, item in self.res_pools.items()], key=lambda item: item["date"], )) if config.analysis: analysis(datas, f"{self.event_path}/res.html") create_json(datas, f"{self.event_path}/res.json")
def run(self): # https://v.qq.com/x/cover/3fvg46217gw800n/h0030qj4fov.html if "https://v.qq.com/x/cover/" not in conf.url: error("not a video link!") exit() datas_path = None try: trio.run(self.__init_session) trio.run(self.__get_video_list, conf.url) datas_path = f"{self.__root_path}/{self.__res_file_name}" if conf.new or not checkPath(datas_path): trio.run(self.__get_all_danmus, self.__cover_info) else: with open(datas_path, "r") as file: self.__results = json.loads(file.read()) # tmp_res = sorted(self.__results.items(), key=lambda item: item[1]['number']) if conf.need_excel: self.create_danmu_xlsx([ "upcount", "commentid", "opername", "timepoint", "uservip_degree", "content", ]) if conf.need_words: self.create_word_clouds() if conf.need_graph: pass finally: create_json(self.__results, datas_path)
def get_data(self, path): with check_times(): try: trio.run(self.get_all) if config.more_details: trio.run(self.add_detail) except Exception as e: error(e) finally: name = f'{path}_{config.status["total"]}' if "json" in config.export_func: create_json(self.results, f"{name}.json") if "excel" in config.export_func: pass
async def __get_video_list(self, url): """Get video's HASH id list Base on the url to fetch the response body and get the key data by pyquery Demo: https://v.qq.com/x/cover/p69wlzli02uqwms/d0023ka5gj7.html Args: url: target video' url """ try: info(f"Url: {url}") response = await self.__async_session.get(url) self.__list_info, self.__cover_info, self.__video_info = Parser.get_details( response) self.__root_path = f'{self.__root_path}/{self.__cover_info["title"]}' info(f"Name: [{self.__root_path}]") initPath(self.__root_path) create_json(self.__list_info, f"{self.__root_path}/list_info.json") create_json(self.__cover_info, f"{self.__root_path}/cover_info.json") create_json(self.__video_info, f"{self.__root_path}/video_info.json") except Exception as e: raise e
def run(self): """ 主运行方法 """ checkPath(config.target_path) self.image_pools = jpg_walk(config.target_path, config.types_filter) self.bar = progressbar.ProgressBar(max_value=len(self.image_pools)) times = 0 while self.image_pools: times += 1 self.get_exif_datas(self.image_pools.pop(), times) if config.location and config.rest_api_key: trio.run(self.init_session) trio.run(self.find_all_address) if config.save_image: info("拷贝图片...") for index, (key, item) in enumerate(self.res_pools.items()): to_file = pathlib.Path( self.image_path, ".".join([str(index), item["path"].name.split(".")[-1]]), ) shutil.copy(str(item["path"]), str(to_file)) self.res_pools[key]["new_path"] = "/".join( str(to_file).split("/")[2:]) datas = sorted( [ item for _, item in self.res_pools.items() if item and item["date"] ], key=lambda i: i.get("date", ""), ) if not datas: exit("结果为空") if config.analysis: analysis(datas, f"{self.event_path}/res.html") for item in datas: item["path"] = str(item["path"].absolute()) create_json(datas, f"{self.event_path}/res.json")
def get_city(self): """ 获取城市列表 """ if check_path(self.city_names): try: with pathlib.Path(self.city_names).open("r") as city_file: self.citys = json.loads(city_file.read()) return except Exception as e: error(f"城市文件读取错误: {e}") try: resp = self.menu_session.get(self.url_list["citys"]) self.save_debug_file(resp.text, "get_city.html") res = self.reg_list["citys"].findall(resp.text) addsucess() self.citys = json.loads(res[0]) create_json(self.citys, self.city_names) except Exception as e: error(f"获取城市信息失败: {e}") addfailed() exit() success(f"{len(self.citys.keys())} citys")