def __process_matches(matches: list) -> Result: result = Result() for match in matches: result.merge_with({ 'rules': [match.rule], 'strings': match.strings, 'score': match.meta['score'] }) return result
def process(self, temp_path: str, real_path: str) -> Result: result = Result() path = real_path.replace(self.git_root, '') if path in self.changed_files: result.score = self.UNCOMMITTED_FILES_WEIGHTING result.rules = ['Git_Uncommitted_Changes'] else: result.score = self.COMMITTED_FILES_WEIGHTING return result
def getCompanies(self, name: str = None): """ Retrieves one or many items from database depending on the inclusion of a filter parameter """ try: if name is None: companies = self.database.getCompanies({}) else: companies = self.database.getCompanies({"Company Name": name}) return Result(200, 'successful', companies) except Exception as ex: self.logger.error(ex) return Result(400, 'An Error Occurred.', [])
def process(self, temp_path: str, real_path: str) -> Result: result = Result() wp_path = real_path.replace(self.wp_root, '') if wp_path not in self.wp_checksums: return result elif self.wp_checksums[wp_path] != self.__get_file_checksum(real_path): result.rules.append('Hash_Verification_Failure') result.score = self.HASH_SCORE_WEIGHTING else: result.rules.append('Hash_Verification_Success') result.score = -self.HASH_SCORE_WEIGHTING return result
def category(title): news_title_list = [] news_title_list.append(title) data = text2matrix(news_title_list) # 使用 MNB 分类模型对新闻进行分类 news_category_list = classfiy_predict(model_path='mnb.model', data=data) return Result(items=news_category_list[0]).to_json()
def news(offset, limit=20): result = {} items = [] category = request.args.get('category') keyword = request.args.get('keyword') if offset < 0 or limit <= 0: result = Result(code=Result.FAIL, msg='offset或limit参数有误!') return result.to_json() # 筛选字段,过滤 _id(ObjectId) 避免序列化异常 column = {'_id': 0} # 组装查询条件,无法拼接出带模糊查询的 sql,pymongo 就是个垃圾!!! # condition = {} # if category: # condition['news_web_category'] = category # if keyword: # regex = {'$regex': '.*{}.*'.format(keyword)} # condition['new_title'] = regex # print(condition) try: if not category and not keyword: items = list(mongo.collection.find({}, column).sort('news_datetime', -1).skip(offset).limit(limit)) count = mongo.collection.find({}, column).count() result = PageResult(items, count=count) if category and not keyword: items = list(mongo.collection.find({'news_web_category': category}, column).sort('news_datetime', -1).skip( offset).limit(limit)) count = mongo.collection.find({'news_web_category': category}, column).count() result = PageResult(items, count=count) if not category and keyword: items = list( mongo.collection.find({'news_title': re.compile(keyword, re.IGNORECASE)}, column).sort('news_datetime', -1).skip( offset).limit(limit)) count = mongo.collection.find({'news_title': re.compile(keyword, re.IGNORECASE)}, column).count() result = PageResult(items, count=count) if keyword and category: items = list( mongo.collection.find({'news_web_category': category, 'news_title': re.compile(keyword, re.IGNORECASE)}, column).sort('news_datetime', -1).skip(offset).limit(limit)) count = mongo.collection.find( {'news_web_category': category, 'news_title': re.compile(keyword, re.IGNORECASE)}, column).count() result = PageResult(items, count=count) except Exception as e: result = Result(code=Result.FAIL, msg=e) return result.to_json()
def scan(path_list: List[str], config: Any) -> Dict[str, Result]: """ For each of the given file paths, scan it for malicious PHP patterns and assign it a score based on how suspicious it seems. """ processors = get_processors(config) num_targets = -1 scan_count = 0 suspicious_count = 0 results = {} if config.show_progress: path_list = list(path_list) num_targets = len(path_list) log.info("{} files found. Scanning...".format(num_targets)) last_update = time.time() for path in path_list: scan_count += 1 if os.path.isdir(path): continue if not validate_file(path): log.debug(path + " doesn't look like a PHP file, skipping.") continue results[path] = Result(config.score_alert_threshold) log.debug("Scanning " + path) temp_path = preprocess(path, TMP_DIR) for processor in processors: if processor.ready(): results[path].merge_with(processor.process(temp_path, path)) if results[path].is_suspicious(): suspicious_count += 1 if config.show_progress and time.time( ) > last_update + PROGRESS_UPDATE_PERIOD: last_update = time.time() print_progress(scan_count, num_targets, suspicious_count, config.start_time) os.unlink(temp_path) if config.priority < 3: sleep((3 - config.priority) * 0.05) return results
def measure_tree_width(image_path, trunk_corners): result = Result() # 保存当前图片的分割结果,初始化为失败 result.set_image_path(image_path) result.set_time(0) if not os.path.exists(image_path): result.set_info(InfoEnum.IMAGE_NOT_EXIST) return result if trunk_corners is None: result.set_info(InfoEnum.BAD_MANUAL_ANNO) return result result.set_trunk_left_top(trunk_corners['left_top']) result.set_trunk_left_bottom(trunk_corners['left_bottom']) result.set_trunk_right_top(trunk_corners['right_top']) result.set_trunk_left_bottom(trunk_corners['right_bottom']) im = cv2.imread(image_path) # 加载图片 im, resize_ratio = resize_image_with_ratio(im) # 调整尺寸 # 检测标定物 calibrator = get_calibrator(im, 0, DEBUG and SHOW) if calibrator is None: result.set_info(InfoEnum.CALIBRATOR_DET_FAILED) return result # 在结果中保存激光点坐标 # resize坐标 -> 原始坐标 org_calibrate_pts = [] for calibrate_pt in calibrator.get_calibrate_points(): org_calibrate_pt = recover_coordinate(calibrate_pt, resize_ratio) org_calibrate_pts.append(org_calibrate_pt) result.set_calibrate_points(org_calibrate_pts) # 计算树径 trunk_left_top = resize_coordinate(trunk_corners['left_top'], resize_ratio) trunk_left_bottom = resize_coordinate(trunk_corners['left_bottom'], resize_ratio) trunk_right_top = resize_coordinate(trunk_corners['right_top'], resize_ratio) trunk_right_bottom = resize_coordinate(trunk_corners['right_bottom'], resize_ratio) l_line = Edge(trunk_left_top, trunk_left_bottom) r_line = Edge(trunk_right_top, trunk_right_bottom) alpha = angle(l_line.vec(), r_line.vec()) if alpha < 5: pixel_dis_l2r = parallel_distance(l_line, r_line) pixel_dis_r2l = parallel_distance(r_line, l_line) if pixel_dis_r2l is not None and pixel_dis_r2l is not None: pixel_width = (pixel_dis_l2r + pixel_dis_r2l) / 2.0 RP_ratio = calibrator.RP_ratio() real_width = (pixel_width * RP_ratio) result.set_info(InfoEnum.SUCCESS) result.set_width(real_width) result.set_conf(1.0) else: result.set_info(InfoEnum.BAD_MANUAL_ANNO) else: result.set_info(InfoEnum.BAD_MANUAL_ANNO) return result
def measure_all(image_path_list): """ 为一批图像测算树径 :param image_path_list: 图像路径列表 :return: 树径测算结果列表 """ if image_path_list is None or len(image_path_list) == 0: # 输入不合法 return {'results': []} seg_count = 0 # 计数分割操作的次数 results_all = [] # 保存所有结果 image_num = len(image_path_list) for i, im_path in enumerate(image_path_list): im_id = im_path.split('/')[-1].split('.')[0] result = Result() # 当前图片测量结果 result.set_image_path(im_path) results_all.append(result.get_result()) time_start = time.time() if DEBUG: print('-' * 30) print('[%d/%d]: %s' % (image_num, i + 1, im_path.split('/')[-1])) if not os.path.exists(im_path): # 图像不存在 result.set_info(InfoEnum.IMAGE_NOT_EXIST) continue im_org = cv2.imread(im_path) # 加载图片 im, resize_ratio = resize_image_with_ratio(im_org) # 调整尺寸 # step1: 检测标定物 calibrator = get_calibrator(im, im_id, DEBUG and SHOW) if calibrator is None: result.set_info(InfoEnum.CALIBRATOR_DET_FAILED) continue # 检查站位 calibrator_ratio = calibrator.get_pixel_scale() / im.shape[0] if calibrator_ratio > 0.25: result.set_info(InfoEnum.STAND_TOO_CLOSE) return result # 在结果中保存标定物坐标 # resize坐标 -> 原始坐标 org_calibrate_pts = [] for calibrate_pt in calibrator.get_calibrate_points(): org_calibrate_pt = recover_coordinate(calibrate_pt, resize_ratio) org_calibrate_pts.append(org_calibrate_pt) result.set_calibrate_points(org_calibrate_pts) # step2: 覆盖标定物,避免标定物影响分割 im_cover = calibrator.cover_calibrator(im) if DEBUG: visualize_image(im_cover, 'img_cover', im_id=im_id, show=DEBUG and SHOW) # 参考标定物切割图片 # 图片块尺寸由小到大迭代尝试 crop_params = [2, 4] for j, n_crop in enumerate(crop_params): im_patch = calibrator.crop_image(im_cover, n_dis_w=n_crop) patch_h, patch_w, _ = im_patch.shape if DEBUG: print('H:%d, W:%d' % (patch_h, patch_w)) visualize_image(im_patch, 'patch_%d' % n_crop, im_id=im_id, show=DEBUG and SHOW) # step4: 分割树干 if im_patch.shape[0] > config.NET_MAX_WIDTH and im_patch.shape[ 1] > config.NET_MAX_WIDTH: # 待分割的目标尺寸太大 result.set_info(InfoEnum.TRUNK_TOO_THICK) break # 交互式分割 trunk_mask = segment_trunk_int(im_patch, calibrator.positive_pts(), None, im_id=seg_count) seg_count += 1 if DEBUG: visualize_image(trunk_mask, 'trunk_mask', im_id=im_id, show=DEBUG and SHOW) # step5: 计算树径 trunk = Trunk(trunk_mask) if DEBUG: visualize_image(trunk.contour_mask, 'trunk_contour', im_id=im_id, show=DEBUG and SHOW) if not trunk.is_seg_succ(): # 初步估计分割是否成功 result.set_info(InfoEnum.TRUNK_EDGE_UNCLEAR) if DEBUG: print(InfoEnum.TRUNK_EDGE_UNCLEAR) continue else: # 计算实际树径 RP_ratio = calibrator.RP_ratio() # 缩放因子 shot_distance = calibrator.shot_distance() # 拍摄距离 trunk_width, seg_conf, patch_trunk_corners = trunk.real_width_v2( shot_distance, RP_ratio) if trunk_width > 0: # 置信度:分割置信度 x 标定置信度 conf = seg_conf * calibrator.get_conf() time_end = time.time() time_consume = int(time_end - time_start) # 图片块坐标 -> resize图片坐标 trunk_left_top = calibrator.recover_coordinate( patch_trunk_corners['left_top']) trunk_right_top = calibrator.recover_coordinate( patch_trunk_corners['right_top']) trunk_left_bottom = calibrator.recover_coordinate( patch_trunk_corners['left_bottom']) trunk_right_bottom = calibrator.recover_coordinate( patch_trunk_corners['right_bottom']) # resize坐标 -> 原图坐标 trunk_left_top = recover_coordinate( trunk_left_top, resize_ratio) trunk_left_bottom = recover_coordinate( trunk_left_bottom, resize_ratio) trunk_right_top = recover_coordinate( trunk_right_top, resize_ratio) trunk_right_bottom = recover_coordinate( trunk_right_bottom, resize_ratio) result.set_width(trunk_width) result.set_conf(conf) result.set_trunk_left_top(trunk_left_top) result.set_trunk_right_top(trunk_right_top) result.set_trunk_left_bottom(trunk_left_bottom) result.set_trunk_right_bottom(trunk_right_bottom) result.set_info(InfoEnum.SUCCESS) result.set_time(time_consume) if DEBUG: print('Trunk width: %.2f CM (%.2f).' % (trunk_width / 10.0, conf)) pts = org_calibrate_pts + \ [trunk_left_top, trunk_left_bottom, trunk_right_top, trunk_right_bottom] show_pts(im_org, pts, im_id=im_id, show=DEBUG and SHOW) break else: result.set_info(InfoEnum.TRUNK_EDGE_UNCLEAR) if DEBUG: print('Error is too large.') output = {'results': results_all} return output
# path = '/home/hadoop/Desktop/test_all' # filelist = os.listdir(path) # right = 0 # for file in filelist: # src_name = file.split('.')[0] # result = num_recog.main(os.path.join(path,file)) # print src_name,result # if int(src_name) == int(result): # right += 1 # else: # continue # right_rate = float(right)/len(filelist) # print ('rate is {}%'.format(right_rate*100)) if __name__ == "__main__": result = Result() img_type = sys.argv[1] img_file = sys.argv[2] # img_type = "file" # img_file = "D:/machine_learning/recognization/insurance_doc/1.jpg" try: num_recog = Number_recognition() predict_data = num_recog.main(img_file) data = {'captcha': predict_data} result.code = code.SUCCESS result.msg = "success" result.data = data except Exception, e: exstr = traceback.format_exc() logging.info('#####系统异错误error:%s', exstr) result.code = code.FAIL