def detect_image(model, file): image = common.imread(file) objs = detect(model, image) for obj in objs: common.drawbbox(image, obj) common.imwrite("detect_result/" + common.file_name_no_suffix(file) + ".draw.jpg", image)
def detect_image(model, file): image = common.imread(file) objs = detect(model, image) for obj in objs: # 增加对五官的检测 frame = organ_detection(image, obj) common.drawbbox(image, obj) cv2.imshow('demo', image) cv2.waitKey()
def do_detect_image(filename, output_filename, align_face: bool = False): if filename == output_filename: raise ValueError("input and output filename can not be same") model = _get_model() try: raw_image = common.imread(filename) if raw_image is None: raise ValueError(f"{filename} is not a image file") face_infos = detect_image_by_nparray(model, raw_image) if align_face: face_infos = _do_align_face(raw_image, face_infos, output_filename) if face_infos is not None: _draw_box(raw_image, face_infos, output_filename) else: face_infos = {} except ValueError: face_infos = {} except cv2.error: face_infos = {} return list([face_info.json for face_info in face_infos])
*common.load_webface("webface/val/label.txt", "webface/WIDER_val/images")) # forward and summary prefix = "webface/WIDER_val/images/" all_result_dict = {} total_file = len(files) for i in range(total_file): # preper key and file_name file = files[i] key = file[len(prefix):file.rfind("/")] file_name = common.file_name_no_suffix(file) # load image and forward image = common.imread(file) objs = eval_tool.detect_image(model, image, mean, std, 0.01) # summary to all_result_dict image_pred = [] for obj in objs: image_pred.append(obj.xywh + [obj.score]) # build all_result_dict if key not in all_result_dict: all_result_dict[key] = {} all_result_dict[key][file_name] = np.array(image_pred) log.info("{} / {}".format(i + 1, total_file)) # write matlab format
def __getitem__(self, index): imgfile, objs = self.items[index] image = common.imread(imgfile) if image is None: log.info("{} is empty, index={}".format(imgfile, index)) return self[random.randint(0, len(self.items) - 1)] keepsize = 12 image, objs = augment.webface(image, objs, self.width, self.height, keepsize=0) # norm image = ((image / 255.0 - self.mean) / self.std).astype(np.float32) posweight_radius = 2 stride = 4 fm_width = self.width // stride fm_height = self.height // stride heatmap_gt = np.zeros((1, fm_height, fm_width), np.float32) heatmap_posweight = np.zeros((1, fm_height, fm_width), np.float32) keep_mask = np.ones((1, fm_height, fm_width), np.float32) reg_tlrb = np.zeros((1 * 4, fm_height, fm_width), np.float32) reg_mask = np.zeros((1, fm_height, fm_width), np.float32) distance_map = np.zeros((1, fm_height, fm_width), np.float32) + 1000 landmark_gt = np.zeros((1 * 10, fm_height, fm_width), np.float32) landmark_mask = np.zeros((1, fm_height, fm_width), np.float32) hassmall = False for obj in objs: isSmallObj = obj.area < keepsize * keepsize if isSmallObj: cx, cy = obj.safe_scale_center(1 / stride, fm_width, fm_height) keep_mask[0, cy, cx] = 0 w, h = obj.width / stride, obj.height / stride x0 = int(common.clip_value(cx - w // 2, fm_width - 1)) y0 = int(common.clip_value(cy - h // 2, fm_height - 1)) x1 = int(common.clip_value(cx + w // 2, fm_width - 1) + 1) y1 = int(common.clip_value(cy + h // 2, fm_height - 1) + 1) if x1 - x0 > 0 and y1 - y0 > 0: keep_mask[0, y0:y1, x0:x1] = 0 hassmall = True for obj in objs: classes = 0 cx, cy = obj.safe_scale_center(1 / stride, fm_width, fm_height) reg_box = np.array(obj.box) / stride isSmallObj = obj.area < keepsize * keepsize if isSmallObj: if obj.area >= 5 * 5: distance_map[classes, cy, cx] = 0 reg_tlrb[classes * 4:(classes + 1) * 4, cy, cx] = reg_box reg_mask[classes, cy, cx] = 1 continue w, h = obj.width / stride, obj.height / stride x0 = int(common.clip_value(cx - w // 2, fm_width - 1)) y0 = int(common.clip_value(cy - h // 2, fm_height - 1)) x1 = int(common.clip_value(cx + w // 2, fm_width - 1) + 1) y1 = int(common.clip_value(cy + h // 2, fm_height - 1) + 1) if x1 - x0 > 0 and y1 - y0 > 0: keep_mask[0, y0:y1, x0:x1] = 1 w_radius, h_radius = common.truncate_radius( (obj.width, obj.height)) gaussian_map = common.draw_truncate_gaussian( heatmap_gt[classes, :, :], (cx, cy), h_radius, w_radius) mxface = 300 miface = 25 mxline = max(obj.width, obj.height) gamma = (mxline - miface) / (mxface - miface) * 10 gamma = min(max(0, gamma), 10) + 1 common.draw_gaussian(heatmap_posweight[classes, :, :], (cx, cy), posweight_radius, k=gamma) range_expand_x = math.ceil(w_radius) range_expand_y = math.ceil(h_radius) min_expand_size = 3 range_expand_x = max(min_expand_size, range_expand_x) range_expand_y = max(min_expand_size, range_expand_y) icx, icy = cx, cy reg_landmark = None fill_threshold = 0.3 if obj.haslandmark: reg_landmark = np.array(obj.x5y5_cat_landmark) / stride x5y5 = [cx] * 5 + [cy] * 5 rvalue = (reg_landmark - x5y5) landmark_gt[0:10, cy, cx] = np.array(common.log(rvalue)) / 4 landmark_mask[0, cy, cx] = 1 if not obj.rotate: for cx in range(icx - range_expand_x, icx + range_expand_x + 1): for cy in range(icy - range_expand_y, icy + range_expand_y + 1): if cx < fm_width and cy < fm_height and cx >= 0 and cy >= 0: my_gaussian_value = 0.9 gy, gx = cy - icy + range_expand_y, cx - icx + range_expand_x if gy >= 0 and gy < gaussian_map.shape[ 0] and gx >= 0 and gx < gaussian_map.shape[ 1]: my_gaussian_value = gaussian_map[gy, gx] distance = math.sqrt((cx - icx)**2 + (cy - icy)**2) if my_gaussian_value > fill_threshold or distance <= min_expand_size: already_distance = distance_map[classes, cy, cx] my_mix_distance = ( 1 - my_gaussian_value) * distance if my_mix_distance > already_distance: continue distance_map[classes, cy, cx] = my_mix_distance reg_tlrb[classes * 4:(classes + 1) * 4, cy, cx] = reg_box reg_mask[classes, cy, cx] = 1 # if hassmall: # common.imwrite("test_result/keep_mask.jpg", keep_mask[0]*255) # common.imwrite("test_result/heatmap_gt.jpg", heatmap_gt[0]*255) # common.imwrite("test_result/keep_ori.jpg", (image*self.std+self.mean)*255) return T.to_tensor( image ), heatmap_gt, heatmap_posweight, reg_tlrb, reg_mask, landmark_gt, landmark_mask, len( objs), keep_mask
import cv2 import common img1 = common.imread('dog.jpg', 0) img2 = cv2.blur(img1, (5, 5)) common.imshow('blur', img1, img2)
import cv2 import common img1 = common.imread('dog.jpg',0) ret,img2 = cv2.threshold(img1,127,255,cv2.THRESH_BINARY) common.imshow('thresh',img1,img2)
import cv2 import common import numpy img1 = common.imread('shapes.jpg',0) ret,img1cp = cv2.threshold(img1,127,255,cv2.THRESH_BINARY) contours, hierarchy = cv2.findContours(img1cp,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) img2 = numpy.zeros(img1.shape,numpy.uint8) cv2.drawContours(img2, contours, -1, 255, 1) print "number of contours = %d" % (len(contours)) for i in range(0,len(contours)): c = contours[i] f = cv2.FONT_HERSHEY_SIMPLEX a = cv2.contourArea(c) l = cv2.arcLength(c,True) m = "%d" % (i) print "%d: len=%d, arcLen=%d, area=%d" % (i,len(c),l,a) p = tuple(c[0][0]) cv2.putText(img2,m,p,f,0.4,(255),1,cv2.CV_AA) common.imshow('contours',img1,img2) # See below for more details: # http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.html#contour-features
def detect_image(model, file): raw_image = common.imread(file) if raw_image is None: raise ValueError(f"{file} is not a image file") return detect_image_by_nparray(model, raw_image)
if flags[index] != 0: continue keep.append(obj) for j in range(index + 1, len(objs)): if flags[j] == 0 and obj.iou(objs[j]) > iou: flags[j] = 1 return keep mean = [0.408, 0.447, 0.47] std = [0.289, 0.274, 0.278] trial_name = "small-H-dense-wide64-UCBA" jobdir = f"jobs/{trial_name}" image = common.imread("imgs/selfie.jpg") model = DBFace(has_landmark=True, wide=64, has_ext=True, upmode="UCBA") #model.load(f"{jobdir}/models/150.pth") model.load_from_zoo() model.eval() model.cuda() outs = eval_tool.detect_image(model, image, mean, std, 0.3) outs = nms(outs, 0.2) print("objs = %d" % len(outs)) for obj in outs: common.drawbbox(image, obj) common.imwrite("test_result/test.jpg", image) print("ok")
indices = indices.squeeze() ys = list((indices / hm_width).int().data.numpy()) xs = list((indices % hm_width).int().data.numpy()) scores = list(scores.data.numpy()) box = box.cpu().squeeze().data.numpy() landmark = landmark.cpu().squeeze().data.numpy() stride = 4 objs = [] for cx, cy, score in zip(xs, ys, scores): if score < threshold: break x, y, r, b = box[:, cy, cx] xyrb = (np.array([cx, cy, cx, cy]) + [-x, -y, r, b]) * stride x5y5 = landmark[:, cy, cx] x5y5 = (common.exp(x5y5 * 4) + ([cx]*5 + [cy]*5)) * stride box_landmark = list(zip(x5y5[:5], x5y5[5:])) objs.append(common.BBox(0, xyrb=xyrb, score=score, landmark=box_landmark)) return nms(objs, iou=nms_iou) image = common.imread('../data/1.jpg') objs = detect(dbface, image) for obj in objs: print(obj) x, y, r, b = (obj.box) cv2.rectangle(image, (int(x), int(y)), (int(r), int(b)), (0, 0, 255), thickness=2) cv2.namedWindow("result", cv2.WINDOW_NORMAL) cv2.imshow("result", image) #结果展示 cv2.waitKey(0)