def is_report_menu_expanded(self): utils.debug("check whether menu [Report Schedule] expanded.") report_menu_class = self.slb.get_attribute( report_schedule.report_menu_item_li, 'class') if "is-opened" in report_menu_class: return True return False
def click_report_biz_list(self): utils.log("click [Report Biz List] tab.") if not self.is_report_menu_expanded(): utils.debug("expand menu [Report Schedule]") self.slb.click(report_schedule.report_toggle) self.slb.move_to_click(report_schedule.report_schedule_link) self.slb.sleep(1)
def get_data_grid_rows(self): utils.debug("get data rows from data grid.") trs = self.slb.get_elements(datagrid.table_trs) if not trs: utils.warn("Not find any data gird data rows!") utils.debug("got total [{}] row data records".format(len(trs))) return trs
def eval(model, workspace, test_path, device): model.eval() # torch.cuda.empty_cache() # speed up evaluating after training finished img_path = os.path.join(test_path, 'Images/Test/') save_path = os.path.join(workspace, 'output') if os.path.exists(save_path): shutil.rmtree(save_path, ignore_errors=True) if not os.path.exists(save_path): os.makedirs(save_path) vis_ctw1500 = os.path.join(workspace, 'vis_ctw1500') if os.path.exists(vis_ctw1500): shutil.rmtree(vis_ctw1500, ignore_errors=True) if not os.path.exists(vis_ctw1500): os.makedirs(vis_ctw1500) long_size = 1280 # 预测所有测试图片 img_paths = [os.path.join(img_path, x) for x in os.listdir(img_path)] for idx, img_path in enumerate(tqdm(img_paths, desc='test models')): img_name = os.path.basename(img_path).split('.')[0] save_name = os.path.join(save_path, 'res_' + img_name + '.txt') assert os.path.exists(img_path), 'file is not exists' img = cv2.imread(img_path) org_img = img.copy() h, w = img.shape[:2] img = scale_aligned_short(img) tensor = transforms.ToTensor()(img) tensor = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(tensor) tensor = tensor.unsqueeze_(0) tensor = tensor.to(device) with torch.no_grad(): preds = model(tensor) preds, boxes_list = pse_decode(preds[0], config.scale, org_img) if config.visualization: for bbox in boxes_list: bbox = np.array(bbox, np.int) cv2.drawContours(org_img, [bbox.reshape(int(bbox.shape[0] / 2), 2)], -1, (0, 255, 0), 2) org_img = cv2.resize(org_img, (640, 640)) debug(idx, img_path, [[org_img]], vis_ctw1500) image_name = img_path.split('/')[-1].split('.')[0] write_result_as_txt(image_name, boxes_list, save_path) # recall precision f1 gt_path = os.path.join(test_path, 'gt/Test') fid_path = os.path.join(workspace, 'res_tt.txt') precision, recall, hmean = evl_totaltext(save_path, gt_path, fid_path) # f_score_new = getresult(save_path,config.gt_name) return precision, recall, hmean
def get_data_grid_headers(self): utils.debug("get headers from data grid.") headers = [] headers_elements = self.slb.get_elements(datagrid.headers) for header in headers_elements: headers.append(self.slb.get_element_text(header)) if not headers: utils.warn("not find any headers for this data grid!") utils.debug("got [{}] data gird headers.".format(len(headers))) return headers
def __init__(self, name, password): name = name.split(' ') self.name = name[0] self.surname = '' for i in range(1, len(name)): self.surname += name[i] + ' ' self.surname = self.surname[:-1] self.password = password u.debug('Mapeado el usuario ' + self.name + ' con contraseña ' + self.password + '.')
def get_data_grid_data(self): utils.debug("get the data grid data in view, save to a dict list.") data_dict_list = [] trs = self.get_data_grid_rows() headers = self.get_data_grid_headers() for tr in trs: temp_row_dict = {} tds_in_tr = self.slb.get_elements_via_element(tr, datagrid.td) for column, td in zip(headers, tds_in_tr): temp_row_dict[column] = self.slb.get_element_text(td) data_dict_list.append(temp_row_dict) utils.debug("get data dict list as below:\n{}".format(data_dict_list)) return data_dict_list
def _check_device(self, device, poc): utils.debug("[*] \tBuilding\tpoc: %s\tsdk: android-%s\tabi: %s" % (poc.file, device.sdk, device.abi)) file_path = self.builder.build_poc(poc_file=poc.file, device_name=device.name, abi=device.abi, sdk=device.sdk) utils.debug("[*] \tExecuting\tpoc: %s\tdevice: %s" % (poc.file, device.name)) status = self.executer.exec_poc(device_name=device.name, binary=file_path) return status
def print_progress(attempts, num_of_passwords): hours, r = divmod(time.time() - t0, 3600) minutes, seconds = divmod(r, 60) speed = attempts / float(seconds) sys.stdout.write('%s %.2f pw/s (%s/%s) %dh%dm%ds\n' % (u.debug(), speed, attempts, num_of_passwords, hours, minutes, seconds)) sys.stdout.flush()
def poll_job(self, job): utils.debug("redash poll job:[{}]".format(job)) counter = 0 while True: response = self.req.get(os.path.join(self.url, config.POLL_JOB, str(job['id'])), headers=self.header) if response.json()['job']['status'] in (3, 4): break elif counter > 80: break else: counter += 1 time.sleep(2) if response.json()['job']['status'] == 3: return response.json()['job']['query_result_id'] else: raise Exception(response.json()['job']['error'])
def startSession(users): user = None while True: valid = False userName = input('Introduce el usuario: ') password = input('Introduce la contraseña: ') for i in range(0, len(users)): user = users[i] if userName == user.name and password == user.password: valid = True break if valid: break print('Usuario y/o contraseña incorrectos.\n') u.debug('Se loguea el usuario ' + userName + ' con contraseña ' + password) print('\nBienvenido de nuevo ' + user.getFullName() + '.') return user
def sendMessage(message, sender, usersPath, userIndex, affineKey, hillKey, alphabet): message = u.formatMessage(message, sender.name) u.debug('Mensaje: ' + message) message = encryptAffine(message, affineKey, alphabet) u.debug('Cifrado ya por afin: ' + message) message = encryptHill(message, hillKey, alphabet) u.debug('Cifrado ya por hill: ' + message) u.deliverMessage(message, usersPath + str(userIndex))
def check_devices(self, devices, pocs, result): for device in devices: for poc in pocs: time.sleep(1) utils.debug("[*] Checking device <%s> with poc <%s>" % (device.name, poc.name)) status = self._check_device(device=device, poc=poc) if status == consts.VULNERABLE: utils.debug( "[!] Device <%s> is VULNERABLE to vulnerability <%s>" % (device.name, poc.cve), mode=consts.DEBUG_RED) else: utils.debug( "[√] Device <%s> is NOT VULNERABLE to vulnerability <%s>" % (device.name, poc.cve), mode=consts.DEBUG_GREEN) print("") result.add_check_result(device=device, poc=poc, status=status)
def diagnose_devices(self, devices, vulns, result): for device in devices: for vuln in vulns: utils.debug( "[*] Diagnosing device <%s> with vulnerability <%s>" % (device.name, vuln.cve)) status = self._diagnose_device(device, vuln) if status == consts.VULNERABLE: utils.debug( "[!] Device <%s> MAY BE VULNERABLE to vulnerability <%s>" % (device.name, vuln.cve), mode=consts.DEBUG_YELLOW) else: utils.debug( "[√] Device <%s> MAY BE NOT VULNERABLE to vulnerability <%s>" % (device.name, vuln.cve), mode=consts.DEBUG_GREEN) print("") result.add_diagnose_result(device=device, vuln=vuln, status=status)
def get_cards_text(self): utils.log("get cards text list in [Statistics Overview].") card_list = self.slb.get_elements_text_list( statistics_overview.card_text) utils.debug("got cards text list: {}".format(card_list)) return card_list
def publish_kyc_reason_api(self, url, header): utils.log(">>publish kyc reason...") publish_result = self.req.post(url, headers=header) time.sleep(1) utils.debug(json.loads(publish_result.text)['data']) return publish_result.text
def eval(model, workspace, test_path, kern_size_, device): model.eval() img_path = os.path.join(test_path, 'Images/Test/') gt_path = os.path.join(test_path, 'gt/Test/') save_path = os.path.join(workspace, 'output') if os.path.exists(save_path): shutil.rmtree(save_path, ignore_errors=True) if not os.path.exists(save_path): os.makedirs(save_path) vis_ctw1500 = os.path.join(workspace, 'vis_ctw1500') if os.path.exists(vis_ctw1500): shutil.rmtree(vis_ctw1500, ignore_errors=True) if not os.path.exists(vis_ctw1500): os.makedirs(vis_ctw1500) img_paths = [os.path.join(img_path, x) for x in os.listdir(img_path)] gt_paths = [ os.path.join(gt_path, 'poly_gt_' + x.split('.')[0] + '.mat') for x in os.listdir(img_path) ] for idx, img_path in enumerate(tqdm(img_paths, desc='test models')): img_name = os.path.basename(img_path).split('.')[0] # 读取gt gt_path_one = gt_paths[idx] gt_box = get_bboxes(gt_path_one) assert os.path.exists(img_path), 'file is not exists' img = cv2.imread(img_path) org_img = img.copy() img = scale_aligned_short(img) tensor = transforms.ToTensor()(img) tensor = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(tensor) tensor = tensor.unsqueeze_(0) tensor = tensor.to(device) with torch.no_grad(): preds = model(tensor) preds_1 = torch.sigmoid(preds[0]) preds_1 = preds_1.detach().cpu().numpy() preds, boxes_list = pse_decode(preds[0], config.scale, org_img, kern_size_) if config.visualization: for bbox in boxes_list: bbox = np.array(bbox, np.int) cv2.drawContours(org_img, [bbox.reshape(int(bbox.shape[0] / 2), 2)], -1, (0, 255, 0), 2) for bbox in gt_box: bbox = np.array(bbox, np.int) cv2.drawContours(org_img, [bbox.reshape(int(bbox.shape[0] / 2), 2)], -1, (0, 0, 255), 2) org_img = cv2.resize(org_img, (640, 640)) image_list = [] image_list.append(org_img) for i in range(7): score = (preds_1[i] * preds_1[-1]).copy().astype(np.float32) score = cv2.resize(score, (640, 640)) score = np.expand_dims(score, -1) score = np.concatenate((score, score, score), -1) image_list.append(score * 255) debug(idx, img_path, [image_list], vis_ctw1500) image_name = img_path.split('/')[-1].split('.')[0] write_result_as_txt(image_name, boxes_list, save_path) # recall precision f1 gt_path = os.path.join(test_path, 'gt/Test') fid_path = os.path.join(workspace, 'res_tt.txt') shutil.rmtree(fid_path, ignore_errors=True) precision, recall, hmean = evl_totaltext(save_path, gt_path, fid_path) # f_score_new = getresult(save_path,config.gt_name) return precision, recall, hmean
utils.logger("###################---" + name + "---###################") em_clf.fit(data_features_train, data_targets_train) # Model evaluation test_data_predicted = em_clf.predict(data_features_test) # Cross validation scores = model_selection.cross_val_score(em_clf, data_features_normalized, targets, cv=10) mean_error = scores.mean() utils.debug('Cross validation result: %s', mean_error) # Get predictions to Kaggle kaggle_predictions = em_clf.predict(test_dataset[:, 1:-1]) # Generate CSV for Kaggle with csv package: path = "../data/predicted_kaggle_" + str(name) + ".csv" # with open(path, "w") as csv_file: # writer = csv.writer(csv_file, delimiter=',') # writer.writerow(["id", "price_doc"]) # # for i in range(len(kaggle_predictions)): # writer.writerow([test_dataset[i][0], kaggle_predictions[i]]) # Generate CSV for Kaggle with pandas (easiest way) df_predicted = pandas.DataFrame({
#!/usr/bin/python # -*- coding: utf-8 -*- """ @File : initialize.py @Time : 2019-04-20 15:28 @Author : Bonan Ruan @Desc : to do some initializations before idf works """ from utils import consts from utils import utils utils.debug("IDF initialized successfully.")
scores = [] q_w = q[1] for j, c in enumerate(cl): c_w = c[1] if (len(c_w) == 0): c_w = "<PAD>" s_w = q[3] score, pred = predictAux(q_w, c_w, s_w, model) scores.append([score, j, 0, pred]) scores = sorted(scores, key=lambda score: score[0], reverse=True) for i in range(len(scores)): scores[i][2] = i + 1 scores = sorted(scores, key=lambda score: score[1]) for score in scores: out.write('\t'.join([ q[0], cl[score[1]][0], str(score[2]), str(score[0]), score[3] ])) out.write('\n') out.close() debug('======= TEST MODE =======') dataPath = config['TEST']['path'] fileList = config['TEST']['files'] data = constructData(dataPath, fileList) output = dataPath + config['TEST']['predictions'] predict(data, output, model) debug('======== FINISHED ========')
def print_progress(attempts, username, password): string = "{} #{}: {} + {}" \ .format(u.debug(), attempts, username, password) print(f'{string}\r', end="")