def handle_url(self): url = self.url_var.get().strip() path = self.path_var.get().strip() if url == '' or path == '': showwarning("warning", "url or path can't be empty!") print("warning", "url or path can't be empty!") return if not os.path.exists(path): showerror("error", " No such file or directory!") print('error', 'No such file or directory') return # 插画详情页下载 if re.match("htt(p|ps)://www.pixivision.net/(zh|ja|en|zh-tw)/a/\d*", url): showinfo("info", "Start download pixivision.net page:" + url) print("info", "Start download pixivision.net page:" + url) IlluDownloadThread( url.strip(), path=path, create_path=True, downloader=self.downloader).register_hook( success_callback=self.download_callback).start() return # 插画列表页下载 elif re.match( r"htt(p|ps)://www.pixivision.net/(zh|ja|en|zh-tw)/c/illustration(/|)(\?p=\d+|)", url): showinfo("info", "Start download pixivision.net page:" + url) print("info", "Start download pixivision.net page:" + url) PixivisionLauncher( url, save_path=path, downloader=self.downloader).register_hook( success_callback=self.download_callback).start() return elif CommonUtils.set_int(url) != 0: showinfo( "info", "Downloading id:" + str(CommonUtils.set_int(url)) + " illustration") print( "info", "Downloading id:" + str(CommonUtils.set_int(url)) + " illustration") self.queue.add_work( Task(TASK_TYPE_ID, DOWNLOAD_MODE_ID, id=CommonUtils.set_int(url), path=path)) return elif url.startswith("http"): # 无法解析的pixivison站 或非 pixiv站 不支持 if url.find("pixivision.net") != -1: showerror("error", "The download link is not supported") print("error", "The download link is not supported") return showinfo("info", "Downloading url:" + url) print("info", "Downloading url:" + url) self.queue.add_work( Task(TASK_TYPE_URL, DOWNLOAD_MODE_URL, url=url, path=path)) else: showerror("error", "")
def download_illustration(illu, path, auth_api): """ #illu 包含插画详细 path 存储路径 auth_api 具有身份验证的下载工具 """ if illu.has_key("url") and illu.has_key("title"): illust_id = CommonUtils.get_url_param(illu.url, "illust_id") detail = PixivApi.illust_detail(illust_id) if detail: try: detail = detail.illust # 普通插画 if detail.page_count == 1: try: url = detail.meta_single_page.original_image_url except: url = detail.image_urls.large download(illust_id, illu.title, path, url, auth_api) # 多图插画 else: if detail.page_count > P_LIMIT: # 该插画P数大于最大限制,放弃下载 print("Pixiv id:%s,name:%s P>limit,Skip download" % (illust_id, illu.title)) return urls = detail.meta_pages # 获取多图 if len(urls) > 1: # 多图放入一个文件夹中 path += "/p_%s" % illust_id if not os.path.exists(path): os.mkdir(path) for index in range(len(urls)): try: url = urls[index].image_urls.original if \ urls[index].image_urls.has_key("original") else urls[index].image_urls.large extension = os.path.splitext(url)[1] if IMAGE_USE_ORG_NAME: save_path = path + "/p_%s_%s_%d%s" % ( illust_id, CommonUtils.filter_dir_name(illu.title), index, extension) else: save_path = path + "/p_%s_%d%s" % (illust_id, index, extension) print(save_path) auth_api.download(url, path=save_path) except: continue else: # 获取多图失败,下载大图 url = detail.image_urls.large download(illust_id, illu.title, path, url, auth_api) except Exception, e: error_log("Download fail:") error_log(e) else: print(illu.title + " can't get detail id :" + illust_id)
def search(self, keywords, path): id_set = set() page = CommonUtils.set_int(self.page_number.get(), 2) fav_num = CommonUtils.set_int(self.fav_num.get(), 0) tasks = [] for p in range(1, page + 1): result = self.search_handler.search(keywords, page=p, download_threshold=fav_num) if len(result) == 0: print('warning', 'Page:' + str(p) + ' Search results are Empty') continue for illu in result: if illu.id in id_set: continue else: task = Task(TASK_TYPE_SEARCH, DOWNLOAD_MODE_ID, title='search ' + keywords, path=path, p_limit=CommonUtils.set_int( self.p_limit.get(), 0), id=illu.id, illu=illu, get_from='search_page') id_set.add(illu.id) tasks.append(task) print('Search ' + keywords + ' from web page get:' + str(len(tasks))) api_search_data = api_search(keywords, self.api, page=page, download_threshold=fav_num, id_set=id_set) if len(api_search_data) == 0: print('warning', 'Api search results are empty') else: print('Search ' + keywords + ' from pixiv Api get:' + str(len(api_search_data))) for illu in api_search_data: task = Task(TASK_TYPE_SEARCH_API, DOWNLOAD_MODE_DETAIL, path=path, p_limit=CommonUtils.set_int(self.p_limit.get(), 0), illu=illu, title='search ' + keywords, get_from='search_api') tasks.append(task) all_count = len(tasks) if all_count > 0: current_count = AtomicInteger.AtomicInteger() if not os.path.exists(path): os.makedirs(path) for task in tasks: task.all_count = all_count task.current_count = current_count self.queue.add_work(task)
def auth(self, username=None, password=None, refresh_token=None): url = 'https://oauth.secure.pixiv.net/auth/token' headers = { 'App-OS': 'ios', 'App-OS-Version': '10.3.1', 'App-Version': '6.8.3', 'User-Agent': 'PixivIOSApp/6.8.3 (iOS 10.3.1; iPhone8,1)', } data = { 'get_secure_url': 1, 'client_id': 'MOBrBDS8blbauoSck0ZfDbtuzpyT', 'client_secret': 'lsACyCD94FhDUtGTXi3QzcFE2uU1hqtDaKeqrdwj', } if CommonUtils.is_not_empty(username) and CommonUtils.is_not_empty( password): data['grant_type'] = 'password' data['username'] = username data['password'] = password elif (refresh_token is not None) or (self.refresh_token is not None): data['grant_type'] = 'refresh_token' data['refresh_token'] = refresh_token or self.refresh_token else: raise PixivError( '[ERROR] auth() but no password or refresh_token is set.') r = requests.post(url, headers=headers, data=data, proxies=pixiv_config.PROXIES) if r.status_code not in [200, 301, 302]: if data['grant_type'] == 'password': raise PixivError( '[ERROR] auth() failed! check username and password.\nHTTP %s: %s' % (r.status_code, r.text), header=r.headers, body=r.text) else: raise PixivError( '[ERROR] auth() failed! check refresh_token.\nHTTP %s: %s' % (r.status_code, r.text), header=r.headers, body=r.text) token = None try: token = parse_json(r.text) self.access_token = token.response.access_token self.user_id = token.response.user.id self.refresh_token = token.response.refresh_token except Exception as e: print(e) raise PixivError('Get access_token error! Response: %s' % token, header=r.headers, body=r.text) print("ACCESS TOKEN " + self.access_token) print("ACCESS Refresh Token " + self.refresh_token) return token
def get_issue_yanshou_yesterday(self): ''' 获取昨天 测试类别为 验收;由 open 变为 resolved ; 且解决结果为 fiexed 与 Pendings的问题 :return: ''' zero_yesterday = CommonUtils.get_date_time_others_zero(-1) last_yesterday = CommonUtils.get_date_time_others_last(-1) sql_preuser2sw = 'select issueid from changegroup where ID in (select groupid from changeitem where \ OLDSTRING="Open" and NEWSTRING="Resolved") and CREATED between %s and %s' issues = MysqlUtils.find_data_dic_2args(sql_preuser2sw, zero_yesterday, last_yesterday) issues_list = [] # 列表元素为字典, 每个字典是检索到的issue的具体信息,用于以后操作 issues_list_use = [] if issues: for item in issues: try: issue_key = str(item['issueid']) issues_list.append({'id': issue_key}) i = len(issues_list) - 1 # 获取当前列表的长度 issues_list[i]['project'] = self.get_issue_project( issue_key) issues_list[i]['summary'] = self.get_issue_summary( issue_key) issues_list[i]['description'] = self.get_issue_description( issue_key) issues_list[i]['type'] = self.get_issue_type(issue_key) issues_list[i]['priority'] = self.get_issue_priority( issue_key) issues_list[i]['status'] = self.get_issue_status(issue_key) issues_list[i]['probability'] = self.get_issue_probability( issue_key) issues_list[i][ 'test_category'] = self.get_issue_test_category( issue_key) issues_list[i]['key'] = self.get_issue_key(issue_key) issues_list[i]['result'] = self.get_issue_result(issue_key) except: # 有异常时不去考虑,继续下一个 continue # 遍历列表,筛选出 版本号为6开头的 for item in issues_list: if ('test_category' in item): if (item['test_category'] == "验收Acceptance" or item['test_category'] == "市场反馈") and (item['result'] == "Fixed" or item['result'] == "Pending"): issues_list_use.append(item) else: issues_list_use = [] return issues_list_use
def related(self, id_var, path): page = CommonUtils.set_int(self.page_number.get(), 2) fav_num = CommonUtils.set_int(self.fav_num.get(), 0) illusts = [] result = self.api.illust_related(id_var) next_url = result.next_url datas = result.illusts if len(datas) == 0: print('warning', 'Get related illus of ' + str(id_var) + ' are empty') return illusts.extend(datas) page -= 1 while len(datas) > 0 and CommonUtils.is_not_empty( next_url) and page > 0: result = self.api.get(next_url) datas = result.illusts next_url = result.next_url illusts.extend(datas) page -= 1 tasks = [] id_set = set() p_limit = CommonUtils.set_int(self.p_limit.get(), 0) for illust in illusts: if not illust or illust.id in id_set: continue if fav_num > 0: if illust.total_bookmarks < fav_num: continue task = Task(TASK_TYPE_RELATED, DOWNLOAD_MODE_DETAIL, path=path, p_limit=p_limit, illu=illust, title="Related by id " + str(id_var), get_from='related') tasks.append(task) id_set.add(illust.id) if len(tasks) == 0: print('warning', 'Get related illus of ' + str(id_var) + ' are empty') return else: print('Get related illus of ' + str(id_var) + ' All:' + str(len(tasks))) all_count = len(tasks) current_count = AtomicInteger.AtomicInteger() if not os.path.exists(path): os.makedirs(path) for task in tasks: task.all_count = all_count task.current_count = current_count self.queue.add_work(task)
def create_smis_provider(self): payload = ConfigurationUtils.load_smis_details() post_response, status = self.communication_utils.post('vdc/smis-providers.json', json_payload=payload) if not status_completed(status): task_id, state = fetch_stats(post_response) CommonUtils.log_this(__name__, "STATE is {0}".format(state.upper()), "") self.free_token = False result = "Task Completed" if self._prompt_status(task_id, 'Creating SMI-S Provider') else "Task Pending..!!" return result
def create_cmcne_fabric_manager(self): payload = ConfigurationUtils.load_cmcne_details() post_response, status = self.communication_utils.post('vdc/network-systems', json_payload=payload) if not status_completed(status): task_id, state = fetch_stats(post_response) CommonUtils.log_this(__name__, "STATE is {0}".format(state.upper()), "") self.free_token = False result = "Task Completed" if self._prompt_status(task_id, 'Creating CMCNE Fabric') else "Task Pending..!!" return result
def create_hosts(self): payloads = ConfigurationUtils.load_hosts_details() tenant_id = self.get_tenant() url = '{0}/hosts.json'.format(tenant_id).replace("/", "", 1) for i in range(0, len(payloads)): post_response, status = self.communication_utils.post(url, json_payload=payloads[i]) if not status_completed(status): task_id, state = fetch_stats(post_response) CommonUtils.log_this(__name__, "STATE is {0}".format(state.upper()), "") result = "Task Completed" if self._prompt_status(task_id, 'Creating Host {0}'.format(i)) else "Task Pending..!!" return result
def _openfolder(self): try: directory = QFileDialog.getExistingDirectory( None, "选取文件夹", CommonUtils.get_setting_ini_('DEFAULT', 'last_open_folder', "./")) # 起始路径 if directory.strip() == "": return CommonUtils.update_setting_ini_('DEFAULT', 'last_open_folder', directory) self.process_folder(directory) except Exception as e: print(e) pass
def worker_run(self, queue, callback): while True: try: task = queue.get() if not task: continue illu_file = None # download if task.download_mode == DOWNLOAD_MODE_ID: illu_file = self.downloader.download_all_by_id( task.id, task.path, p_limit=task.p_limit if task.has_key('p_limit') else 0) elif task.download_mode == DOWNLOAD_MODE_URL: illu_file = self.downloader.download_all_by_url( task.url, task.path) elif task.download_mode == DOWNLOAD_MODE_DETAIL: illu_file = self.downloader.download_by_detail( task.illu, task.path, p_limit=task.p_limit) # callback if task.task_type == TASK_TYPE_ID and callback: msg = CommonUtils.build_callback_msg(illu_file, id=str(task.id)) callback(msg) elif task.task_type == TASK_TYPE_URL and callback: msg = CommonUtils.build_callback_msg(illu_file, url=str(task.url)) callback(msg) elif callback: if illu_file: if illu_file != PAGE_LIMIT_CONTINUE: callback("%s:%s\nFile:%s\n\n" % (task.get_from + " get", task.illu.get('id'), illu_file)) else: callback("%s:%s\nFile:%s\n\n" % (task.get_from + " get", task.illu.get('id'), 'Download Fail')) except Exception as e: print("error", e) finally: queue.task_done() print(threading.currentThread().getName() + ":Current Task Number:" + str(queue.qsize())) if callback and task and task.has_key( 'all_count') and task.all_count > 0: if illu_file: current_count = task.current_count.getAndInc() if current_count + 1 == task.all_count: afterEndCallback(task, callback)
def ranking(self, path, mode, date, pages=1): path = path + "/ranking_" + mode + '_' + date page = 0 offset = 0 tasks = [] while page < pages: ranking_data = self.api.app_ranking(mode=mode, date=date, offset=offset).illusts page = page + 1 if len(ranking_data) > 0: print ('Get from ranking(page=' + str(page) + '):' + str(len(ranking_data))) for illu in ranking_data: task = Task(TASK_TYPE_RANKING, DOWNLOAD_MODE_DETAIL, path=path, p_limit=CommonUtils.set_int(self.p_limit.get(), 0), illu=illu, title="Ranking_" + mode + "_" + date, get_from='ranking') tasks.append(task) offset = offset + 1 else: print ('warning', 'Ranking(page=' + str(page) + ') results are empty') showerror("error", 'Ranking(page=' + str(page) + ') results are empty') break all_count = len(tasks) if all_count > 0: current_count = AtomicInteger.AtomicInteger() if not os.path.exists(path): os.makedirs(path) for task in tasks: task.all_count = all_count task.current_count = current_count self.queue.add_work(task)
def api_search(keyword, api, page=1, download_threshold=DOWNLOAD_THRESHOLD): illusts = [] if CommonUtils.is_empty(keyword): raise PixivError('[ERROR] keyword is empty') ids = set() count = 0 for data in api.search_popular_illust('百合').illusts: if download_threshold: if data.total_bookmarks >= download_threshold: if data.id not in ids: ids.add(data.id) illusts.append(data) elif data.id not in ids: ids.add(data.id) illusts.append(data) if page: while page > 0: for data in api.search_illust('百合', offset=count).illusts: count = count + 1 if download_threshold: if data.total_bookmarks >= download_threshold: if data.id not in ids: ids.add(data.id) illusts.append(data) elif data.id not in ids: ids.add(data.id) illusts.append(data) page = page - 1 return illusts
def illust_recommended(self, content_type='illust', include_ranking_label=True, illust_filter='for_ios', max_bookmark_id_for_recommend=None, min_bookmark_id_for_recent_illust=None, offset=None, include_ranking_illusts=None): url = 'https://app-api.pixiv.net/v1/illust/recommended' params = { 'content_type': content_type, 'include_ranking_label': format_bool(include_ranking_label), 'filter': illust_filter, } if max_bookmark_id_for_recommend: params[ 'max_bookmark_id_for_recommend'] = max_bookmark_id_for_recommend if min_bookmark_id_for_recent_illust: params[ 'min_bookmark_id_for_recent_illust'] = min_bookmark_id_for_recent_illust if offset: params['offset'] = offset if include_ranking_illusts: params['include_ranking_illusts'] = CommonUtils.format_bool( include_ranking_illusts) r = self.auth_requests_call('GET', url, params=params) return parse_resp(r)
def _openfiles(self): try: files, file_type = QFileDialog.getOpenFileNames( None, "多文件选择", CommonUtils.get_setting_ini_('DEFAULT', 'last_open_folder', "./"), "All Files (*)") # files, file_type = QFileDialog.getOpenFileName(self, 'Open File', '', "All Files (*)",options = QFileDialog.DontUseNativeDialog) if len(files) == 0: return CommonUtils.update_setting_ini_('DEFAULT', 'last_open_folder', files[0]) # self._save_last_open_folder(files[0]) self.process_files(files) except Exception as e: print(e) pass
def confirm_click(self): config = CommonUtils.read_config() config.set('DEFAULT', 'gif_start', self.gifStartTime.text()) config.set('DEFAULT', 'gif_end', self.gifEndTime.text()) config.set('DEFAULT', 'gif_interval', self.gifInterval.text()) config.write(open('setting.ini', 'w')) self.close()
def run(self): if not os.path.exists(self.path): try: os.makedirs(self.path) except Exception as e: error_log("make dir Fail:" + self.path) error_log(e) return try: path = ImageDownload.download_topics(self.url, self.path, create_path=self.create_path, downloader=self.downloader) if self.success: self.success(CommonUtils.build_callback_msg(path, url=self.url)) if self.callback_params and self.callback_params.has_key( 'current_count') and self.callback_params.has_key( 'all_count'): current_count = self.callback_params[ 'current_count'].getAndInc() if self.callback_params['all_count'] == (current_count + 1): self.success("Download from Pixivision:\n" + self.callback_params['url'] + "\nAll tasks are complete!\n\n") except Exception as e: print("Download topics fail") print(e) if self.fail: self.fail()
def search(self, word, page=1, search_type='illust', download_threshold=DOWNLOAD_THRESHOLD): if word: url = (PIXIV_SEARCH_URL % (word, search_type, int(page))) else: raise PixivError('search word can not be null') print(url) html = self.request_page(url) if not html: print("Get Page is None!URL:" + url) return [] search_result = PixivHtmlParser.parse_search_result(html) pop_result = PixivHtmlParser.parse_popular_introduction(html) if not pop_result: pop_result = [] if search_result: pop_result.extend(search_result) # 过滤数据不完整和收藏数不超过阈值的插画信息 if len(pop_result) > 0: pop_result = filter( lambda data: (data.has_key("url") and data.has_key( "title") and data.has_key("mark_count") and int( data.mark_count) >= download_threshold), pop_result) for result in pop_result: if not result.has_key('id'): result['id'] = CommonUtils.get_url_param( result['url'], "illust_id") return pop_result
def download_illustration(self, illu, path, p_limit=0): """ 通过程序构造的插画详情下载 :param illu: 插画详情 :param path: 下载路径 :param p_limit: 插画p数(页数)限制 0代表不限制 :return: """ if illu.has_key("url") and illu.has_key("title"): illust_id = CommonUtils.get_url_param(illu.url, "illust_id") detail = self.api.illust_detail(illust_id) if detail: try: detail = detail.illust # 普通插画 if detail.page_count == 1: try: url = detail.meta_single_page.original_image_url except Exception: url = detail.image_urls.large path = self.download(illust_id, path, url) # 多图插画 else: if 0 < p_limit < detail.page_count: # 该插画P数大于最大限制,放弃下载 print("Pixiv id:%s, P>limit,Skip download" % (illust_id, )) return PAGE_LIMIT_CONTINUE urls = detail.meta_pages # 获取多图 if len(urls) > 1: # 多图放入一个文件夹中 path += "/p_%s" % illust_id if not os.path.exists(path): os.mkdir(path) for index in range(len(urls)): try: url = urls[index].image_urls.original if \ urls[index].image_urls.has_key("original") else urls[index].image_urls.large extension = os.path.splitext(url)[1] save_path = path + "/p_%s_%d%s" % ( illust_id, index, extension) print(save_path) self.api.download(url, path=save_path) except Exception: continue path = path + "/" else: # 获取多图失败,下载大图 url = detail.image_urls.large path = self.download(illust_id, path, url) return path except Exception as e: error_log("Download fail:") error_log(e) else: print(illu.title + " can't get detail id :" + illust_id) else: return
def _downlowd_info(self): try: video_list = SqlUtils._select_( "SELECT identifier,hash from video where is_download = 0 and type = 1" ) for video in video_list: # self.statusbar.showMessage("下载影片信息中, 影片本地名称:" + video[1] + " 识别码:" + video[0]) self.status_text_label.setText("下载影片信息中, 影片本地名称:" + video[1] + " 识别码:" + video[0]) CommonUtils.get_video_info(video[0], video[1], 1) # self.statusbar.showMessage("影片-" + video[1] + "-信息下载完成") self.status_text_label.setText("影片-" + video[1] + "-信息下载完成") # self.statusbar.showMessage("所有影片信息下载完成") self.status_text_label.setText("所有影片信息下载完成") except Exception as e: print("下载异常,请重试: " + str(e)) pass
def download(illust_id, title, path, url, auth_api): extension = os.path.splitext(url)[1] if IMAGE_USE_ORG_NAME: save_path = path + "/p_%s_%s%s" % ( illust_id, CommonUtils.filter_dir_name(title), extension) else: save_path = path + "/p_%s%s" % (illust_id, extension) print(save_path) auth_api.download(url, path=save_path)
def handle_search(self): keywords = self.keywords.get().strip() if CommonUtils.is_empty(keywords): showwarning("warning", "Please enter search keywords!") print("warning", "Please enter search keywords!") return if CommonUtils.is_empty(self.path_var.get()): showwarning("warning", "path can't be empty!") print("warning", "path can't be empty!") return path = self.path_var.get().strip() if not os.path.exists(path): showerror("error", " No such file or directory!") print('error', 'No such file or directory') return path = path + "/" + CommonUtils.filter_dir_name("search_" + keywords) showinfo("info", "Is searching:") search_handler = Thread(target=self.search, args=(keywords, path)) search_handler.start()
def _process_video_list(self, video_list): config = CommonUtils.read_config() image_type = config.get('DEFAULT', 'default_img_type') qb_identifier_str = config.get('DEFAULT', 'qb_identifier') qb_identifier_arr = qb_identifier_str.split(",") for _video_path in video_list: _video_path = _video_path.replace("\\", "/") _video_name = _video_path[_video_path.rfind('/') + 1:_video_path.rfind('.')] _hash = _video_name # _hash = file_md5(_video_path) is_exists, video_path_in_datebase = SqlUtils.hash_exists(_hash) if is_exists: if video_path_in_datebase != _video_path: # reply = QMessageBox.question(None, _video_name, "数据库中已存在名称相同的视频,是否更新视频路径?", # QMessageBox.Yes | QMessageBox.No) # if reply == QMessageBox.Yes: sql = "UPDATE video SET video_path = ?,video_name_local=? WHERE hash = ?" SqlUtils.update_video(sql, (_video_path, _video_name, _hash)) self.status_text_label.setText("更新路径完成") # self.statusbar.showMessage("更新路径完成", 5000) else: _identifier = "" _serious = "" _video_type = 0 try: _video_type = self.get_video_type(_video_name, qb_identifier_arr) if _video_type == 1: _identifier, _serious = self._get_qb_identifier( qb_identifier_arr, _video_name) print(_video_name + " : " + _identifier) else: pass except Exception as e: print(e) pass video = VideoFileClip(_video_path) # 打开视频 video.close() # 关闭视频 video_width = str(video.size[0]) video_height = str(video.size[1]) resolution = video_width + ',' + video_height # todo 获取视频时长 # video.duration sql = "INSERT INTO video (resolution,series,identifier,type,video_name_local,video_path,img_type,hash) VALUES (?,?,?,?,?,?,?,?)" SqlUtils.update_video( sql, (resolution, _serious, _identifier, _video_type, _video_name, _video_path, image_type, _hash)) self.status_text_label.setText(_video_name + " : " + _identifier + " 已导入") # self.statusbar.showMessage(_video_name + " : " + _identifier + " 已导入", 5000) print(_hash) self.status_text_label.setText("导入完成")
def handle_related(self): id_var = CommonUtils.set_int(self.id_var.get().strip()) if id_var <= 0: showwarning("warning", "Please enter search keywords!") print("warning", "Please enter search keywords!") return if CommonUtils.is_empty(self.path_var.get()): showwarning("warning", "path can't be empty!") print("warning", "path can't be empty!") return path = self.path_var.get().strip() if not os.path.exists(path): showerror("error", " No such file or directory!") print('error', 'No such file or directory') return path = path + "/" + CommonUtils.filter_dir_name("related_" + str(id_var)) showinfo("info", "Get related illus of " + str(id_var) + " :") related_handler = Thread(target=self.related, args=(id_var, path)) related_handler.start()
def download_pic(self): try: video = SqlUtils._select_("SELECT identifier,img_url from video where hash = " + '\''+ self.video_hash+'\'') is_success_download_img = CommonUtils.download_img(video[0][0], video[0][1]) if is_success_download_img: sql = "UPDATE video SET is_download = ? WHERE hash = ?" SqlUtils.update_video(sql, (2, self.video_hash)) # self.statusbar.showMessage("图片下载成功", 5000) print("2222") except Exception as e: print(e)
def get_pixivision_topics(cls, url, path): topic_list = HtmlDownloader.parse_illustration_topic( HtmlDownloader.download(url)) if not topic_list: error_log(url + " not find any illustration topic") return for topic in topic_list: try: # 需要过滤掉特殊字符,否则会创建文件夹失败。 # 创建特辑文件夹,写入特辑信息。 save_path = path + "/" + CommonUtils.filter_dir_name( topic.title) if not os.path.exists(save_path): os.makedirs(save_path) CommonUtils.write_topic(save_path + "/topic.txt", topic) topic['save_path'] = save_path except Exception, e: continue error_log("Create topic path fail,topic url:" + topic.Href) error_log(e)
def get_issues_closed_yesterday(self, issue_type=("Market Problem", "Market Problem")): ''' 获取昨天close的 market problem 问题; 且问题处理类型必须为 无法分析 或是 新增问题 :param issue_type: 问题类型 :return: issues 的列表 ''' today = CommonUtils.get_date_others(0) yesterday = CommonUtils.get_date_others(-1) # -1是昨天;-2是前天 # resolved 字段对应 resolutiondate 字段 jql = 'issuetype in {} and (resolved >{} and resolved < {})'.format( issue_type, yesterday, today) # [昨天,今天) issues = self.jira_client.search_issues(jql) issues_list = [] # 列表元素为字典, 每个字典是检索到的issue的具体信息,用于以后操作 issues_list_use = [] for item in issues: issue_key = item.key issues_list.append({'key': issue_key}) i = len(issues_list) - 1 # 获取当前列表的长度 issues_list[i]['project'] = self.get_issue_project(issue_key) issues_list[i]['summary'] = self.get_issue_summary(issue_key) issues_list[i]['feedback'] = self.get_issue_feedback(issue_key) issues_list[i]['type'] = self.get_issue_type(issue_key) issues_list[i]['priority'] = self.get_issue_priority(issue_key) issues_list[i]['status'] = self.get_issue_status(issue_key) issues_list[i]['result'] = self.get_issue_result_xinxianglian( issue_key) issues_list[i]['source'] = self.get_issue_source(issue_key) issues_list[i]['resolutiondate'] = self.get_issue_resolutiondate( issue_key) issues_list[i]['probability'] = self.get_issue_probability( issue_key) # 遍历列表,筛选出 新增问题 与 无法分析 的问题 for item in issues_list: if ((item['result'] == '新增问题' or item['result'] == '无法分析') and (item['source'] == '信相连' or item['source'] == '网络信息')): issues_list_use.append(item) return issues_list_use
def perform_proximity_search(QUERIES_FILE, INVERTED_DICT_FILE, SYSTEM_NAME): OUTPUT_FILE = properties.proximity_output_path + "/Results_" + SYSTEM_NAME + ".txt" inv_indx_dict = get_inv_index_dict(INVERTED_DICT_FILE) output_lst = [] for query in CommonUtils.get_all_queries(QUERIES_FILE): query_terms_lst = CommonUtils.get_query_terms_lst(query) q_id = CommonUtils.get_q_id(query) # print(q_id) all_terms_match_docs = get_all_terms_matched_docs( inv_indx_dict, query_terms_lst) # if no document has all the query terms, find the nearby macthed documents docs_pos_dict = get_info_for_all_matched_docs(query_terms_lst, all_terms_match_docs, inv_indx_dict) exact_matches_dict = get_proximity_scored_dict(docs_pos_dict) near_match_dict = find_near_match_docs(inv_indx_dict, query_terms_lst) # merged_dict = {**near_match_dict, **docs_pos_dict} merged_dict = merge_two_dics(near_match_dict, exact_matches_dict) sorted_merged_tuple = sorted(merged_dict.items(), key=operator.itemgetter(1), reverse=True) sorted_merged_tuple = sorted_merged_tuple[:100] # print("==>",dict(sorted_merged_tuple)) final_scored_dict = dict(sorted_merged_tuple) output_lst.append([query, final_scored_dict]) # write_results(q_id, final_scored_dict, OUTPUT_FILE) write_results(output_lst, OUTPUT_FILE) print( "[INFO] Top 100 documents with scores are written in to the output file ..." ) # build_dict(output_lst) print("[INFO] Evaluation being performed on proximity results ...") find_effectiveness_scores(OUTPUT_FILE, "PROXIMITY_" + SYSTEM_NAME) print( "[INFO] Evaluation done and the results are written to output file ..." )
def __init__(self): super(custom_lab_widget, self).__init__() self.setupUi(self) self.add_tab_pushButton.clicked.connect(self._add_lab_on_click) self.confirm_pushButton.clicked.connect(self._confirm_pushButton_on_click) custom_tag_str = str(CommonUtils.get_setting_ini_('DEFAULT', 'custom_tag', "")) for tag in custom_tag_str.split(","): if tag.strip() == '': continue self.checkBox = QCheckBox(tag) self.checkBox.setChecked(False) self.flowLayout.addWidget(self.checkBox)
def download_topics(cls, url, path, quality=1): html = HtmlDownloader.download(url) illu_list = HtmlDownloader.parse_illustration(html) title_des = HtmlDownloader.get_title(html) if title_des and illu_list: title_des["size"] = len(illu_list) CommonUtils.write_topic_des(path + "/topic.txt", title_des) if not illu_list: return for illu in illu_list: try: filename = CommonUtils.filter_dir_name(illu.title) extension = os.path.splitext(illu.image)[1] id = CommonUtils.get_url_param(illu.image_page, "illust_id") if quality == 1: # 通过api获取 插画原图地址,下载原图 detail = PixivApi.illust_detail(id) if detail: download_url = ImageDownload.get_image_url( illu, detail) if IMAGE_USE_ORG_NAME: save_path = path + "/p_%s_%s%s" % (id, filename, extension) else: save_path = path + "/p_%s%s" % (id, extension) print(save_path) PixivApi.download(download_url, path=save_path) else: print(illu.title + " can't get detail id :" + id) else: # 直接下载 pixivision 展示图 print(path + "/p_%s_%s%s" % (id, filename, extension)) PixivApi.download(illu.image, path=path + "/p_%s_%s%s" % (id, filename, extension)) except Exception, e: error_log("Download Illu Fail:" + " Illustration :" + str(illu)) error_log(e) continue
def get_issue_preuser2sw_yesterday(self): zero_yesterday = CommonUtils.get_date_time_others_zero(-1) last_yesterday = CommonUtils.get_date_time_others_last(-1) sql_preuser2sw = 'select issueid from changegroup where ID in (select groupid from changeitem where \ OLDSTRING="PreUser Bug" and NEWSTRING="SW Bug") and CREATED between %s and %s' issues = MysqlUtils.find_data_dic_2args(sql_preuser2sw, zero_yesterday, last_yesterday) issues_list = [] # 列表元素为字典, 每个字典是检索到的issue的具体信息,用于以后操作 issues_list_use = [] if issues: for item in issues: issue_key = str(item['issueid']) issues_list.append({'id': issue_key}) i = len(issues_list) - 1 # 获取当前列表的长度 issues_list[i]['project'] = self.get_issue_project(issue_key) issues_list[i]['summary'] = self.get_issue_summary(issue_key) issues_list[i]['description'] = self.get_issue_description( issue_key) issues_list[i]['type'] = self.get_issue_type(issue_key) issues_list[i]['priority'] = self.get_issue_priority(issue_key) issues_list[i]['status'] = self.get_issue_status(issue_key) issues_list[i]['source'] = self.get_issue_source(issue_key) issues_list[i]['probability'] = self.get_issue_probability( issue_key) issues_list[i]['versions'] = self.get_issue_versions(issue_key) issues_list[i]['key'] = self.get_issue_key(issue_key) # 遍历列表,筛选出 版本号为6开头的 for item in issues_list: if (ReUtils.bool_version_6(item['versions'])): issues_list_use.append(item) else: issues_list_use = [] return issues_list_use
def _prompt_status(self, task_id, task_name): self.free_token = False returnValue = False for i in range(0, STATUS_COUNT_THRESHOLD): get_response, status = self.communication_utils.get('vdc/tasks/{0}.json'.format(task_id)) task_id, state = fetch_stats(get_response) CommonUtils.log_this(__name__, "{0} is {1}".format(task_name, state.upper()), "") if state == 'ready': self.free_token = True returnValue = True break else: if self.status_counter == STATUS_COUNT_THRESHOLD: returnValue = False break time.sleep(1) return returnValue
def handle_ranking(self): mode = self.mode_var.get() path = self.path_var.get().strip() page = CommonUtils.set_int(self.page_number.get(), 2) date = self.date_var.get().strip() if CommonUtils.is_empty(path): showwarning("warning", "path can't be empty!") print ("warning", "path can't be empty!") return if not os.path.exists(path): showerror("error", " No such file or directory!") print ('error', 'No such file or directory') return if not CommonUtils.validate_date_str(date): showerror("error", "Date Wrong!") print ('error', 'Date Wrong') return if datetime.datetime.strptime(date, '%Y-%m-%d') > datetime.datetime.now(): showerror("error", "The date can not be greater than the day!") print ('error', 'The date can not be greater than the day') return showinfo("info", "Get ranking...") ranking_handler = Thread(target=self.ranking, args=(path, mode, date, page)) ranking_handler.start()
def download_topics(cls, url, path, create_path=False, downloader=None): html = PixivisionHtmlParser.download(url) illu_list = PixivisionHtmlParser.parse_illustration(html) title_des = PixivisionHtmlParser.get_title(html) # # 是否由该线程自主创建文件夹 if create_path and title_des and title_des.has_key('title'): path = path + "/" + title_des['title'] if not os.path.exists(path): os.makedirs(path) if title_des and illu_list: title_des["size"] = len(illu_list) title_des["url"] = url CommonUtils.write_topic_des(path + "/topic.txt", title_des) if not illu_list: return for illu in illu_list: id = CommonUtils.get_url_param(illu.image_page, "illust_id") if downloader: downloader.download_all_by_id(id, path + '/') else: PixivImageDownloader.download_all_by_id(id, path + '/') print('*' * 10) print(url + " Download End!") return path
from Crypto.Util.number import size from dateutil.parser import parser __author__ = 'zanetworker' from ConfigParser import SafeConfigParser import utils.CommonUtils as CommonUtil parser = SafeConfigParser() try: config_file = CommonUtil.get_file_location('config', 'deployment.ini') parser.read(config_file) except Exception as e: print e.message def load_vipr_credentials(): results = { 'vipr_host': parser.get('vipr', 'HOST'), 'vipr_port': parser.get('vipr', 'PORT', 4443), 'cookie_path': parser.get('vipr', 'COOKIE_DIR_ABS_PATH') } return results def load_smis_details():