def video_demo(cam_id): weights = args.model detector = FaceDetector(weights) vide_capture = cv2.VideoCapture(cam_id) while 1: ret, img = vide_capture.read() img_show = img.copy() img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) boxes = detector(img, 0.5) for box_index in range(boxes.shape[0]): bbox = boxes[box_index] color = (255, 0, 0) thickness = 2 radius = 2 cv2.rectangle(img_show, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, thickness) for point_x, point_y in zip(bbox[4::2], bbox[5::2]): cv2.circle(img_show, (int(point_x), int(point_y)), radius, color, thickness) cv2.namedWindow('res', 0) cv2.imshow('res', img_show) key = cv2.waitKey(1) if key == ord('q'): break
def video_demo(cam_id): weights = args.model detector = FaceDetector(weights) vide_capture = cv2.VideoCapture(cam_id) while 1: ret, img = vide_capture.read() img_show = img.copy() img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) boxes = detector(img, 0.5) for box_index in range(boxes.shape[0]): bbox = boxes[box_index] cv2.rectangle(img_show, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 4) cv2.namedWindow('res', 0) cv2.imshow('res', img_show) key = cv2.waitKey(1) if key == ord('q'): break
def demo(): args.model detector = FaceDetector(args.model) # files = glob.glob(data_dir + '/*') # files.sort(key=os.path.getmtime) files = ["/home/ubuntu/seg/TAD16K/test_3/32871.jpg"] for pic in files: if pic.endswith('jpg'): img = cv2.imread(pic) img_show = img.copy() img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) pred = detector(img, 0.4) print('pred ', pred.shape, pred) for i, pred_i in enumerate(pred): if pred_i.shape[0] > 0: for bbox in pred_i: cv2.rectangle(img_show, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 255), 2) cv2.putText(img_show, str(i), (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2) cv2.imwrite("test.png", img_show)
def image_demo(data_dir): recognition = SignRecognition( "/home/ubuntu/seg/autonomous-Sign-Detector/checkpoints/best_ckpt.h5") detector = FaceDetector(args.model) new_dir = data_dir + "_result" files = glob.glob(data_dir + '/*') files.sort(key=os.path.getmtime) for pic in tqdm(files): if pic.endswith('jpg') or pic.endswith("png"): img = cv2.imread(pic) img_show = img.copy() img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) pred = detector(img, 0.4) for i, pred_i in enumerate(pred): if pred_i.shape[0] > 0: for bbox in pred_i: cv2.rectangle(img_show, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 255), 2) cv2.putText(img_show, str(i), (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2) newpath = pic.replace(data_dir, new_dir) dn = os.path.dirname(newpath) if not os.path.exists(dn): os.makedirs(dn) cv2.imwrite(newpath, img_show)
def image_demo(data_dir): args.model detector = FaceDetector(args.model) files = glob.glob(data_dir + '/*') files.sort(key=os.path.getmtime) for pic in files: if pic.endswith('jpg'): img = cv2.imread(pic) img_show = img.copy() img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) pred = detector(img, 0.4) # print('pred ', pred.shape, pred) for i, pred_i in enumerate(pred): if pred_i.shape[0] > 0: for bbox in pred_i: cv2.rectangle(img_show, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 255), 2) cv2.putText(img_show, str(i), (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 255), 2) cv2.namedWindow('res', 0) cv2.imshow('res', img_show) k = cv2.waitKey(0) if k == 110: continue else: break
def image_demo(data_dir): args.model detector = FaceDetector(args.model) count = 0 pics = [] GetFileList(data_dir, pics) pics = [x for x in pics if 'jpg' in x or 'png' in x] #pics.sort() for pic in pics: img = cv2.imread(pic) img_show = img.copy() img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) star = time.time() boxes = detector(img, 0.3) #print('one iamge cost %f s'%(time.time()-star)) #print(boxes.shape) #print(boxes) ################toxml or json print(boxes.shape[0]) if boxes.shape[0] == 0: print(pic) for box_index in range(boxes.shape[0]): bbox = boxes[box_index] color = (255, 0, 0) thickness = 2 radius = 2 print(bbox) cv2.rectangle(img_show, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, thickness) for point_x, point_y in zip(bbox[4::2], bbox[5::2]): cv2.circle(img_show, (int(point_x), int(point_y)), radius, color, thickness) start_point = (int(bbox[4]), int(bbox[5])) end_point = (int(bbox[6]), int(bbox[7])) cv2.line(img_show, start_point, end_point, color, thickness) # cv2.putText(img_show, str(bbox[4]), (int(bbox[0]), int(bbox[1]) + 30), # cv2.FONT_HERSHEY_SIMPLEX, 1, # (255, 0, 255), 2) # # cv2.putText(img_show, str(int(bbox[5])), (int(bbox[0]), int(bbox[1]) + 40), # cv2.FONT_HERSHEY_SIMPLEX, 1, # (0, 0, 255), 2) cv2.namedWindow('res', 0) cv2.imshow('res', img_show) key = cv2.waitKey(0) if key == ord('q'): break print(count)
def __init__(self): self.face_detector = FaceDetector() self.face_landmark = Keypoints() self.top_k = cfg.DETECT.topk ###another thread should do detector in a slow way and update the track_box self.track_box = None self.previous_image = None self.previous_box = None self.diff_thres = 5 self.iou_thres = cfg.TRACE.iou_thres
def __init__(self): self.face_detector = FaceDetector() self.face_landmark = FaceLandmark() self.trace = GroupTrack() ###another thread should run detector in a slow way and update the track_box self.track_box = None self.previous_image = None self.previous_box = None self.diff_thres = 5 self.top_k = cfg.DETECT.topk self.iou_thres = cfg.TRACE.iou_thres self.alpha = cfg.TRACE.smooth_box
def image_demo(data_dir): args.model detector = FaceDetector(args.model) count = 0 pics = [] GetFileList(data_dir, pics) pics = [x for x in pics if 'jpg' in x or 'png' in x] #pics.sort() for pic in pics: img = cv2.imread(pic) img_show = img.copy() img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) star = time.time() boxes = detector(img, 0.3) #print('one iamge cost %f s'%(time.time()-star)) #print(boxes.shape) #print(boxes) ################toxml or json print(boxes.shape[0]) if boxes.shape[0] == 0: print(pic) for box_index in range(boxes.shape[0]): bbox = boxes[box_index] cv2.rectangle(img_show, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 4) # cv2.putText(img_show, str(bbox[4]), (int(bbox[0]), int(bbox[1]) + 30), # cv2.FONT_HERSHEY_SIMPLEX, 1, # (255, 0, 255), 2) # # cv2.putText(img_show, str(int(bbox[5])), (int(bbox[0]), int(bbox[1]) + 40), # cv2.FONT_HERSHEY_SIMPLEX, 1, # (0, 0, 255), 2) cv2.namedWindow('res', 0) cv2.imshow('res', img_show) cv2.waitKey(0) print(count)
def __init__(self): self.face_detector = FaceDetector() self.face_landmark = FaceLandmark() self.trace = GroupTrack() ###another thread should run detector in a slow way and update the track_box self.track_box = None self.previous_image = None self.previous_box = None self.diff_thres = 5 self.top_k = cfg.DETECT.topk self.min_face = cfg.DETECT.min_face self.iou_thres = cfg.TRACE.iou_thres self.alpha = cfg.TRACE.smooth_box if 'ema' in cfg.TRACE.ema_or_one_euro: self.filter = EmaFilter(self.alpha) else: self.filter = OneEuroFilter()
ap.add_argument( "--model", required=True, default='', help="model to eval:") ap.add_argument( "--is_show", required=False, default=False, help="show result or not?") ap.add_argument( "--data_dir", required=False, default="./FDDB/img", help="dir to img") ap.add_argument( "--split_dir", required=False,default='./FDDB/FDDB-folds',help="dir to FDDB-folds") ap.add_argument( "--result", required=False,default='./result',help="dir to write result") args = ap.parse_args() IMAGES_DIR = args.data_dir ANNOTATIONS_PATH = args.split_dir RESULT_DIR = args.result if not os.access(RESULT_DIR,os.F_OK): os.mkdir(RESULT_DIR) face_detector = FaceDetector(args.model) annotations = [s for s in os.listdir(ANNOTATIONS_PATH) if s.endswith('ellipseList.txt')] image_lists = [s for s in os.listdir(ANNOTATIONS_PATH) if not s.endswith('ellipseList.txt')] annotations = sorted(annotations) image_lists = sorted(image_lists) images_to_use = [] for n in image_lists: with open(os.path.join(ANNOTATIONS_PATH, n)) as f: images_to_use.extend(f.readlines()) images_to_use = [s.strip() for s in images_to_use] with open(os.path.join(RESULT_DIR, 'faceList.txt'), 'w') as f: for p in images_to_use: f.write(p + '\n')
ap.add_argument("--split_dir", required=False, default='../FDDB/FDDB-folds', help="dir to FDDB-folds") ap.add_argument("--result", required=False, default='./resultFDDB', help="dir to write result") args = ap.parse_args() IMAGES_DIR = args.data_dir ANNOTATIONS_PATH = args.split_dir RESULT_DIR = args.result MODEL_PATH = args.model face_detector = FaceDetector([MODEL_PATH]) annotations = [ s for s in os.listdir(ANNOTATIONS_PATH) if s.endswith('ellipseList.txt') ] image_lists = [ s for s in os.listdir(ANNOTATIONS_PATH) if not s.endswith('ellipseList.txt') ] annotations = sorted(annotations) image_lists = sorted(image_lists) images_to_use = [] for n in image_lists: with open(os.path.join(ANNOTATIONS_PATH, n)) as f: images_to_use.extend(f.readlines())
max_score = np.max(det_accu[:, 4]) det_accu_sum = np.zeros((1, 5)) det_accu_sum[:, 0:4] = np.sum(det_accu[:, 0:4], axis=0) / np.sum( det_accu[:, -1:]) det_accu_sum[:, 4] = max_score try: dets = np.row_stack((dets, det_accu_sum)) except: dets = det_accu_sum dets = dets[0:750, :] return dets #face_detector = FaceDetector([MODEL_META_PATH,MODEL_PATH]) face_detector = FaceDetector(['./model/detector.pb']) predictions = [] for n in tqdm(images_to_use): image_array = cv2.imread(os.path.join(IMAGES_DIR, n) + '.jpg') image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB) # threshold is important to set low boxes = face_detector(image_array, score_threshold=0.05) # ##flip det but seems no needs for fddb # flip_img=np.flip(image_array,1) # # boxes_flip_ = face_detector(flip_img, score_threshold=0.05) # boxes_flip = np.zeros(boxes_flip_.shape) # boxes_flip[:, 0] = flip_img.shape[1] - boxes_flip_[:, 2] # boxes_flip[:, 1] = boxes_flip_[:, 1]
ap.add_argument("--split_dir", required=False, default='./FDDB/FDDB-folds', help="dir to FDDB-folds") ap.add_argument("--result", required=False, default='./result', help="dir to write result") args = ap.parse_args() IMAGES_DIR = args.data_dir ANNOTATIONS_PATH = args.split_dir RESULT_DIR = args.result MODEL_PATH = args.model INPUT_SHAPE = (args.input_shape, args.input_shape) face_detector = FaceDetector(MODEL_PATH) annotations = [ s for s in os.listdir(ANNOTATIONS_PATH) if s.endswith('ellipseList.txt') ] image_lists = [ s for s in os.listdir(ANNOTATIONS_PATH) if not s.endswith('ellipseList.txt') ] annotations = sorted(annotations) image_lists = sorted(image_lists) images_to_use = [] for n in image_lists: with open(os.path.join(ANNOTATIONS_PATH, n)) as f: images_to_use.extend(f.readlines())
import glob import cv2 import os import time import matplotlib.pyplot as plt import tensorflow as tf from lib.core.api.face_detector import FaceDetector os.environ["CUDA_VISIBLE_DEVICES"] = "-1" detector = FaceDetector(['./model/detector_2.pb']) def GetFileList(dir, fileList): newDir = dir if os.path.isfile(dir): fileList.append(dir) elif os.path.isdir(dir): for s in os.listdir(dir): # if s == "pts": # continue newDir = os.path.join(dir, s) GetFileList(newDir, fileList) return fileList def facedetect(): count = 0 # print('t') # data_dir = '/Users/andreyrizhiy/face_detect/ASF/*.jpg' # pics = []