示例#1
0
def evalu_img(imgpath, min_size):
    cv2.namedWindow("test")
    cv2.moveWindow("test", 1400, 10)
    threshold = np.array([0.5, 0.5, 0.9])
    detect_model = MTCNNDet(min_size, threshold)
    img = cv2.imread(imgpath)
    rectangles = detect_model.detectFace(img)
    #draw = img.copy()
    if rectangles is not None:
        for rectangle in rectangles:
            score_label = str("{:.2f}".format(rectangle[4]))
            cv2.putText(img, score_label,
                        (int(rectangle[0]), int(rectangle[1])),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
            cv2.rectangle(img, (int(rectangle[0]), int(rectangle[1])),
                          (int(rectangle[2]), int(rectangle[3])), (255, 0, 0),
                          1)
            if len(rectangle) > 5:
                if config.x_y:
                    for i in range(5, 15, 2):
                        cv2.circle(
                            img,
                            (int(rectangle[i + 0]), int(rectangle[i + 1])), 2,
                            (0, 255, 0))
                else:
                    rectangle = rectangle[5:]
                    for i in range(5):
                        cv2.circle(img,
                                   (int(rectangle[i]), int(rectangle[i + 5])),
                                   2, (0, 255, 0))
    else:
        print("No face detected")
    cv2.imshow("test", img)
    cv2.waitKey(0)
示例#2
0
def evalu_img(args):
    imgpath = args.img_path
    min_size = args.min_size
    model_dir = args.model_dir
    cv2.namedWindow("test")
    cv2.moveWindow("test", 1400, 10)
    base_name = "test_img"
    save_dir = './output'
    crop_size = [112, 112]
    FaceAnti_model_dir = os.path.join(model_dir, cfgs.DATASET_NAME)
    model_path = os.path.join(FaceAnti_model_dir, cfgs.MODEL_PREFIX)
    FaceAnti_Model = Face_Anti_Spoof(model_path,
                                     args.load_epoch,
                                     cfgs.IMG_SIZE,
                                     args.gpu,
                                     layer='fc')
    caffe_mode_dir = os.path.join(model_dir, 'FaceDetect')
    threshold = np.array([0.7, 0.8, 0.95])
    Detect_Model = MTCNNDet(min_size, threshold, caffe_mode_dir)
    img = cv2.imread(imgpath)
    h, w = img.shape[:2]
    if cfgs.img_downsample and h > 1000:
        img = img_ratio(img, 240)
    rectangles = Detect_Model.detectFace(img)
    #draw = img.copy()
    if len(rectangles) > 0:
        rectangles = sort_box(rectangles)
        '''
        points = np.array(rectangles)
        #print('rec shape',points.shape)
        points = points[:,5:]
        points_list = points.tolist()
        crop_imgs = alignImg(img,crop_size,points_list)
        for idx_cnt,img_out in enumerate(crop_imgs):
            savepath = os.path.join(save_dir,base_name+'_'+str(idx_cnt)+".jpg")
            #img_out = cv2.resize(img_out,(96,112))
            #cv2.imshow("test",img_out)
            cv2.imwrite(savepath,img_out)
        '''
        img_verify = img_crop(img, rectangles[0], img.shape[1], img.shape[0])
        tmp, pred_id = FaceAnti_Model.inference(img_verify)
        label_show(img, rectangles, pred_id)
    else:
        print("No face detected")
    cv2.imshow("test", img)
    cv2.waitKey(0)
示例#3
0
def main():
    cv2.namedWindow("test")
    cv2.moveWindow("test", 1400, 10)
    threshold = [0.5, 0.9, 0.9]
    imgpath = "test2.jpg"
    parm = args()
    min_size = parm.min_size
    file_in = parm.file_in
    detect_model = MTCNNDet(min_size, threshold)
    if file_in is 'None':
        cap = cv2.VideoCapture(0)
    else:
        cap = cv2.VideoCapture(file_in)
    if not cap.isOpened():
        print("failed open camera")
        return 0
    else:
        while 1:
            _, frame = cap.read()
            rectangles = detect_model.detectFace(frame)
            draw = frame.copy()
            if len(rectangles) > 0:
                for rectangle in rectangles:
                    score_label = str("{:.2f}".format(rectangle[4]))
                    cv2.putText(draw, score_label,
                                (int(rectangle[0]), int(rectangle[1])),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
                    cv2.rectangle(draw, (int(rectangle[0]), int(rectangle[1])),
                                  (int(rectangle[2]), int(rectangle[3])),
                                  (255, 0, 0), 1)
                    if len(rectangle) > 5:
                        for i in range(5, 15, 2):
                            cv2.circle(
                                draw,
                                (int(rectangle[i + 0]), int(rectangle[i + 1])),
                                2, (0, 255, 0))
            cv2.imshow("test", draw)
            q = cv2.waitKey(10) & 0xFF
            if q == 27 or q == ord('q'):
                break
        cap.release()
        cv2.destroyAllWindows()
示例#4
0
def main(file_in,db_file,id_dir,save_dir,frame_num,out_file):
    if file_in is None:
        v_cap = cv2.VideoCapture(0)
    else:
        v_cap = cv2.VideoCapture(file_in)
    min_size = 24
    base_name = "test"
    threshold = np.array([0.8,0.9,0.95])
    if os.path.exists(save_dir):
        pass
    else:
        os.makedirs(save_dir)
    if os.path.isfile(out_file):
        record_w = open(out_file,'w')
    else:
        print("the output file is not exist: ",out_file)
    detect_model = MTCNNDet(min_size,threshold) 
    facereg_model = DB_Reg(db_file,id_dir,save_dir)
    #model_path = "../models/haarcascade_frontalface_default.xml"
    #detect_model = FaceDetector_Opencv(model_path)
    if faceconfig.mx_:
        crop_size = [112,112]
    else:
        crop_size = [112,96]
    idx_cnt = 0
    record_w.write("crop size is: %s \n" % ("\t".join([str(x) for x in crop_size])))
    person_name_dict = dict()
    cv2.namedWindow("gallery")
    cv2.namedWindow("querry")
    cv2.namedWindow("video")
    cv2.namedWindow("src")
    cv2.moveWindow("gallery",100,10)
    cv2.moveWindow("querry",500,10)
    cv2.moveWindow("video",1400,10)
    cv2.moveWindow("src",650,10)
    show_h = crop_size[0]
    show_w = crop_size[1]
    id_prob_show = np.zeros([6*show_h,3*show_w,3],dtype=np.uint8)
    if not v_cap.isOpened():
        print("field to open video")
    else:
        if file_in is not None:
            print("video frame num: ",v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
            total_num = v_cap.get(cv2.CAP_PROP_FRAME_COUNT)
            if total_num > 100000:
                total_num = 30000
        else:
            total_num = 100000
        record_w.write("the video has total num: %d \n" % total_num)
        if frame_num is not None:
            v_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_num)
        fram_w = v_cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        fram_h = v_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        fram_cnt = 0
        while v_cap.isOpened():
            ret,frame = v_cap.read()
            fram_cnt+=1
            sys.stdout.write("\r>> deal with %d / %d" % (fram_cnt,total_num))
            sys.stdout.flush()
            if ret: 
                t = time.time()
                rectangles = detect_model.detectFace(frame)
                t_det = time.time() - t
            else:
                continue
            if len(rectangles)> 0:
                #rectangles = sort_box(rectangles)
                if faceconfig.time:
                    print("one frame detect time cost {:.3f} ".format(t_det))
                if detconfig.crop_org:
                    for bbox_one in rectangles:
                        idx_cnt+=1
                        img_out = img_crop(frame,bbox_one,fram_w,fram_h)
                        savepath = os.path.join(save_dir,base_name+'_'+str(idx_cnt)+".jpg")
                        #savepath = os.path.join(save_dir,line_1)
                        img_out = cv2.resize(img_out,(96,112))
                        #cv2.imwrite(savepath,img_out)
                else:
                    points = np.array(rectangles)
                    points = points[:,5:]
                    points_list = points.tolist()
                    #crop_imgs = Align_Image.extract_image_chips(frame,points_list)
                    if videoconfig.det_box_widen:
                        crop_imgs = img_crop2(frame,rectangles,crop_size,fram_w,fram_h)
                    else:
                        crop_imgs = alignImg(frame,crop_size,points_list)
                    if videoconfig.box_widen:
                        crop_widen_imgs = img_crop3(frame,rectangles,fram_w,fram_h)
                        assert len(crop_imgs) == len(crop_widen_imgs), "boxes widen is not equal the align"
                    if len(crop_imgs) == 0:
                        continue
                    for crop_id,img_out in enumerate(crop_imgs):
                        idx_cnt+=1
                        #savepath = os.path.join(save_dir,base_name+'_'+str(idx_cnt)+".jpg")
                        #img_out = cv2.resize(img_out,(96,112))
                        #cv2.imshow("test",img_out)
                        #cv2.imwrite(savepath,img_out)
                        t = time.time()
                        person_name, db_img,img_en = facereg_model.findNearPerson(img_out)
                        t_reg = time.time() - t
                        if person_name is not None:
                            if faceconfig.time:
                                print("a face recognize time cost {:.3f} ".format(t_reg))
                            person_cnt = person_name_dict.setdefault(person_name,fram_cnt)
                            if fram_cnt - person_cnt <= faceconfig.frame_interval:
                                if videoconfig.box_widen:
                                    facereg_model.saveperson(crop_widen_imgs[crop_id])
                                else:
                                    facereg_model.saveperson(img_out)
                                #person_name_dict[person_name] = person_cnt+1
                            person_name_dict[person_name] = person_cnt+1
                            #db_img = cv2.cvtColor(db_img,cv2.COLOR_RGB2BGR)
                            if faceconfig.debug:
                                print("crop img",img_out.shape,img_out[0,0:10,0])
                            id_prob_show[:show_h,:show_w,:] = db_img
                            #id_prob_show[:112,192:,:] = img_out
                            id_prob_show[:show_h,2*show_w:,:] = img_en
                            #cv2.imshow("querry",id_prob_show[:112,:96,:])
                            if faceconfig.debug:
                                print("show img id and crop ",id_prob_show[10,5:10,0],id_prob_show[10,200:210,0])
                            cv2.imshow("gallery",id_prob_show)
                            cv2.waitKey(50)
                            id_prob_show[show_h:,:,:] = id_prob_show[:-show_h,:,:]
                        cv2.imshow("querry",img_en)
                        #cv2.imshow("src",img_out)
                        if videoconfig.box_widen:
                            cv2.imshow("src",crop_widen_imgs[crop_id])
                        else:
                            cv2.imshow("src",img_out)
                label_show(frame,rectangles)
            else:
                #print("current frame no face ")
                pass
            cv2.imshow("video",frame)
            key_ = cv2.waitKey(10) & 0xFF
            if key_ == 27 or key_ == ord('q'):
                break
            if fram_cnt == total_num:
                break
    person_real = 0
    not_reg_name = []
    reg_img_cnt = 0
    for name in facereg_model.db_names:
        if name in facereg_model.db_cnt_dict.keys():
            if facereg_model.db_cnt_dict[name]:
                person_real+=1
                reg_img_cnt += facereg_model.db_cnt_dict[name]
            else:
                not_reg_name.append(name)
        else:
            not_reg_name.append(name)
    print("person ",person_real)
    print("total faces ",idx_cnt)
    print("reg faces ",reg_img_cnt)
    print("not reg person: ",not_reg_name)
    record_w.write("person recognized num: %d \n" % person_real)
    record_w.write("detected total faces: %d \n" % idx_cnt)
    record_w.write("recognize      faces: %d \n" % reg_img_cnt)
    record_w.write("in db not reg person: %s \n" % (" ".join([str(y) for y in not_reg_name])))
    record_w.close()
    v_cap.release()
    cv2.destroyAllWindows()
示例#5
0
def save_cropfromvideo(args):
    '''
    file_in: input video file path
    base_name: saved images prefix name
    save_dir: saved images path
    fun: saved detect faces to dir
    '''
    file_in = args.file_in
    #base_name = args.base_name
    #save_dir = args.save_dir
    #save_dir2 = args.save_dir2
    #crop_size = args.crop_size
    #size_spl = crop_size.strip().split(',')
    #crop_size = [int(size_spl[0]),int(size_spl[1])]
    min_size = args.min_size
    model_dir = args.model_dir
    FaceAnti_model_dir = os.path.join(model_dir, cfgs.DATASET_NAME)
    model_path = os.path.join(FaceAnti_model_dir, cfgs.MODEL_PREFIX)
    FaceAnti_Model = Face_Anti_Spoof(model_path,
                                     args.load_epoch,
                                     cfgs.IMG_SIZE,
                                     args.gpu,
                                     layer='fc')
    caffe_mode_dir = os.path.join(model_dir, 'FaceDetect')
    threshold = np.array([0.7, 0.8, 0.95])
    Detect_Model = MTCNNDet(min_size, threshold, caffe_mode_dir)
    if file_in is None:
        v_cap = cv2.VideoCapture(0)
    else:
        v_cap = cv2.VideoCapture(file_in)
    #model_path = "../models/haarcascade_frontalface_default.xml"
    #detect_model = FaceDetector_Opencv(model_path)
    idx_cnt = 0
    #mk_dirs(save_dir)
    #mk_dirs(save_dir2)
    if not v_cap.isOpened():
        print("field to open video")
    else:
        print("video frame num: ", v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
        total_num = v_cap.get(cv2.CAP_PROP_FRAME_COUNT)
        frame_w = v_cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        frame_h = v_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        frame_cnt = 0
        class_dict = dict()
        pred_id_show = int(0)
        cv2.namedWindow("src")
        cv2.namedWindow("crop")
        cv2.moveWindow("crop", 1400, 10)
        cv2.moveWindow("src", 10, 10)
        while v_cap.isOpened():
            ret, frame = v_cap.read()
            if ret:
                rectangles = Detect_Model.detectFace(frame)
            else:
                continue
            if len(rectangles) > 0:
                rectangles = sort_box(rectangles)
                frame_cnt += 1
                '''
                if cfgs.crop_org:
                    for bbox_one in rectangles:
                        idx_cnt+=1
                        img_out = img_crop(frame,bbox_one,frame_w,frame_h)
                        savepath = os.path.join(save_dir,base_name+'_'+str(idx_cnt)+".jpg")
                        #savepath = os.path.join(save_dir,line_1)
                        img_out = cv2.resize(img_out,(96,112))
                        cv2.imwrite(savepath,img_out)
                else:
                    points = np.array(rectangles)
                    points = points[:,5:]
                    points_list = points.tolist()
                    crop_imgs = alignImg(frame,crop_size,points_list)
                    for box_idx,img_out in enumerate(crop_imgs):
                        idx_cnt+=1
                        savepath = os.path.join(save_dir,base_name+'_'+str(idx_cnt)+".jpg")
                        #img_out = cv2.resize(img_out,(96,112))
                        #cv2.imshow("test",img_out)
                        cv2.imwrite(savepath,img_out)
                        cv2.waitKey(50)
                        if cfgs.box_widen:
                            savepath2 = os.path.join(save_dir2,base_name+'_'+str(idx_cnt)+".jpg")
                            img_widen = img_crop(frame,rectangles[box_idx],frame_w,frame_h)
                            cv2.imwrite(savepath2,img_widen)
                            cv2.waitKey(50)
                        print("crop num,",idx_cnt)
                '''
                img_verify = img_crop(frame, rectangles[0], frame_w, frame_h)
                tmp, pred_id = FaceAnti_Model.inference(img_verify)
                if frame_cnt == 10:
                    frame_cnt = 0
                    for key_name in class_dict.keys():
                        if key_name in cfgs.DATA_NAME[1:]:
                            pred_id_show = int(1)
                            break
                        else:
                            pred_id_show = int(0)
                    class_dict.clear()
                else:
                    cur_cnt = class_dict.setdefault(cfgs.DATA_NAME[pred_id], 0)
                    class_dict[cfgs.DATA_NAME[pred_id]] = cur_cnt + 1
                label_show(frame, rectangles, pred_id_show)
                cv2.imshow('crop', img_verify)
            else:
                #print("failed ")
                pass
            cv2.imshow("src", frame)
            key_ = cv2.waitKey(10) & 0xFF
            if key_ == 27 or key_ == ord('q'):
                break
            #if fram_cnt == total_num:
            #   break
    v_cap.release()
    cv2.destroyAllWindows()
示例#6
0
def save_cropfromtxt(args):
    '''
    file_in: images path recorded
    base_dir: images locate in 
    save_dir: detect faces saved in
    fun: saved id images, save name is the same input image
    '''
    file_in = args.file_in
    #base_name = args.base_name
    #save_dir = args.save_dir
    #crop_size = args.crop_size
    #size_spl = crop_size.strip().split(',')
    #crop_size = [int(size_spl[0]),int(size_spl[1])]
    model_dir = args.model_dir
    min_size = args.min_size
    f_ = open(file_in, 'r')
    failed_w = open('./output/failed_face3.txt', 'w')
    lines_ = f_.readlines()
    FaceAnti_model_dir = os.path.join(model_dir, cfgs.DATASET_NAME)
    model_path = os.path.join(FaceAnti_model_dir, cfgs.MODEL_PREFIX)
    FaceAnti_Model = Face_Anti_Spoof(model_path,
                                     args.load_epoch,
                                     cfgs.IMG_SIZE,
                                     args.gpu,
                                     layer='fc')
    caffe_mode_dir = os.path.join(model_dir, 'FaceDetect')
    threshold = np.array([0.7, 0.8, 0.95])
    Detect_Model = MTCNNDet(min_size, threshold, caffe_mode_dir)
    mk_dirs(save_dir)
    idx_cnt = 0
    if cfgs.show:
        cv2.namedWindow("src")
        cv2.namedWindow("crop")
        cv2.moveWindow("crop", 650, 10)
        cv2.moveWindow("src", 100, 10)
    total_item = len(lines_)
    for i in tqdm(range(total_item)):
        line_1 = lines_[i]
        line_1 = line_1.strip()
        img_path = os.path.join(base_dir, line_1)
        img = cv2.imread(img_path)
        if img is None:
            continue
        h, w = img.shape[:2]
        if cfgs.img_downsample and min(w, h) > 1000:
            img = img_ratio(img, 240)
        line_s = line_1.split("/")
        img_name = line_s[-1]
        new_dir = '/'.join(line_s[:-1])
        rectangles = Detect_Model.detectFace(img)
        if len(rectangles) > 0:
            idx_cnt += 1
            rectangles = sort_box(rectangles)
            '''
            if not cfgs.crop_org:
                points = np.array(rectangles)
                points = points[:,5:]
                points_list = points.tolist()
                points_list = [points_list[0]]
                img_out = alignImg(img,crop_size,points_list)
                img_out = img_out[0]
            else:
                img_out = img_crop(img,rectangles[0],img.shape[1],img.shape[0])
                #savepath = os.path.join(save_dir,str(idx_cnt)+".jpg")
                if cfgs.imgpad:
                    img_out = Img_Pad(img_out,crop_size)
                else:
                    img_out = cv2.resize(img_out,(crop_size[1],crop_size[0]))
            savepath = os.path.join(save_dir,line_1)
            '''
            img_verify = img_crop(img, rectangles[0], img.shape[1],
                                  img.shape[0])
            tmp, pred_id = FaceAnti_Model.inference(img_verify)
            #cv2.imwrite(savepath,img)
            if cfgs.show:
                label_show(img, rectangles, pred_id)
                cv2.imshow("crop", img_verify)
                cv2.waitKey(1000)
        else:
            failed_w.write(img_path)
            failed_w.write('\n')
            print("failed ", img_path)
        if cfgs.show:
            cv2.imshow("src", img)
            cv2.waitKey(10)
    failed_w.close()
    f_.close()
示例#7
0
def save_cropfromvideo(file_in, base_name, save_dir, save_dir2, crop_size):
    '''
    file_in: input video file path
    base_name: saved images prefix name
    save_dir: saved images path
    fun: saved detect faces to dir
    '''
    if file_in is None:
        v_cap = cv2.VideoCapture(0)
    else:
        v_cap = cv2.VideoCapture(file_in)
    min_size = 24
    threshold = np.array([0.7, 0.8, 0.95])
    detect_model = MTCNNDet(min_size, threshold)
    #model_path = "../models/haarcascade_frontalface_default.xml"
    #detect_model = FaceDetector_Opencv(model_path)
    #crop_size = [112,96]
    Align_Image = Align_img(crop_size)

    def mk_dirs(path):
        if os.path.exists(path):
            pass
        else:
            os.makedirs(path)

    def img_crop(img, bbox, w, h):
        x1 = int(max(bbox[0], 0))
        y1 = int(max(bbox[1], 0))
        x2 = int(min(bbox[2], w))
        y2 = int(min(bbox[3], h))
        cropimg = img[y1:y2, x1:x2, :]
        return cropimg

    def img_crop2(img, bbox, imgw, imgh):
        x1 = int(bbox[0])
        y1 = int(bbox[1])
        x2 = int(bbox[2])
        y2 = int(bbox[3])
        if config.box_widen:
            boxw = x2 - x1
            boxh = y2 - y1
            x1 = int(max(0, int(x1 - 0.2 * boxw)))
            y1 = int(max(0, int(y1 - 0.1 * boxh)))
            x2 = int(min(imgw, int(x2 + 0.2 * boxw)))
            y2 = int(min(imgh, int(y2 + 0.1 * boxh)))
        cropimg = img[y1:y2, x1:x2, :]
        return cropimg

    idx_cnt = 0

    def label_show(img, rectangles):
        for rectangle in rectangles:
            #print(map(int,rectangle[5:]))
            score_label = str("{:.2f}".format(rectangle[4]))
            #score_label = str(1.0)
            cv2.putText(img, score_label,
                        (int(rectangle[0]), int(rectangle[1])),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
            cv2.rectangle(img, (int(rectangle[0]), int(rectangle[1])),
                          (int(rectangle[2]), int(rectangle[3])), (255, 0, 0),
                          1)
            if len(rectangle) > 5:
                if config.x_y:
                    for i in range(5, 15, 2):
                        cv2.circle(
                            img,
                            (int(rectangle[i + 0]), int(rectangle[i + 1])), 2,
                            (0, 255, 0))
                else:
                    rectangle = rectangle[5:]
                    for i in range(5):
                        cv2.circle(img,
                                   (int(rectangle[i]), int(rectangle[i + 5])),
                                   2, (0, 255, 0))

    def sort_box(boxes_or):
        boxes = np.array(boxes_or)
        x1 = boxes[:, 0]
        y1 = boxes[:, 1]
        x2 = boxes[:, 2]
        y2 = boxes[:, 3]
        area = np.multiply(x2 - x1 + 1, y2 - y1 + 1)
        I = area.argsort()[::-1]
        #print(I)
        #print(boxes_or[0])
        idx = map(int, I)
        return boxes[idx[:]].tolist()

    mk_dirs(save_dir)
    mk_dirs(save_dir2)
    if not v_cap.isOpened():
        print("field to open video")
    else:
        print("video frame num: ", v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
        total_num = v_cap.get(cv2.CAP_PROP_FRAME_COUNT)
        fram_w = v_cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        fram_h = v_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        fram_cnt = 0
        while v_cap.isOpened():
            ret, frame = v_cap.read()
            fram_cnt += 1
            if ret:
                rectangles = detect_model.detectFace(frame)
            else:
                continue
            if len(rectangles) > 0:
                #rectangles = sort_box(rectangles)
                if config.crop_org:
                    for bbox_one in rectangles:
                        idx_cnt += 1
                        img_out = img_crop(frame, bbox_one, fram_w, fram_h)
                        savepath = os.path.join(
                            save_dir, base_name + '_' + str(idx_cnt) + ".jpg")
                        #savepath = os.path.join(save_dir,line_1)
                        img_out = cv2.resize(img_out, (96, 112))
                        cv2.imwrite(savepath, img_out)
                else:
                    points = np.array(rectangles)
                    points = points[:, 5:]
                    points_list = points.tolist()
                    #crop_imgs = Align_Image.extract_image_chips(frame,points_list)
                    crop_imgs = alignImg(frame, crop_size, points_list)
                    for box_idx, img_out in enumerate(crop_imgs):
                        idx_cnt += 1
                        savepath = os.path.join(
                            save_dir, base_name + '_' + str(idx_cnt) + ".jpg")
                        #img_out = cv2.resize(img_out,(96,112))
                        #cv2.imshow("test",img_out)
                        cv2.imwrite(savepath, img_out)
                        cv2.waitKey(50)
                        if config.box_widen:
                            savepath2 = os.path.join(
                                save_dir2,
                                base_name + '_' + str(idx_cnt) + ".jpg")
                            img_widen = img_crop2(frame, rectangles[box_idx],
                                                  fram_w, fram_h)
                            cv2.imwrite(savepath2, img_widen)
                            cv2.waitKey(50)
                        print("crop num,", idx_cnt)
                '''
                savedir = os.path.join(save_dir,new_dir)
                if os.path.exists(savedir):
                    savepath = os.path.join(savedir,img_name)
                    shutil.copyfile(img_path,savepath)
                else:
                    os.makedirs(savedir)
                    savepath = os.path.join(savedir,img_name)
                    shutil.copyfile(img_path,savepath)
                '''
                #cv2.imwrite(savepath,img)
                #label_show(frame,rectangles)
            else:
                #print("failed ")
                pass
            #cv2.imshow("test",frame)
            key_ = cv2.waitKey(10) & 0xFF
            if key_ == 27 or key_ == ord('q'):
                break
            if fram_cnt == total_num:
                break
    print("total ", idx_cnt)
    v_cap.release()
    cv2.destroyAllWindows()
示例#8
0
def save_cropfromtxt(file_in, base_dir, save_dir, crop_size):
    '''
    file_in: images path recorded
    base_dir: images locate in 
    save_dir: detect faces saved in
    fun: saved id images, save name is the same input image
    '''
    f_ = open(file_in, 'r')
    lines_ = f_.readlines()
    min_size = 24
    threshold = np.array([0.5, 0.8, 0.9])
    detect_model = MTCNNDet(min_size, threshold)
    #model_path = "../models/haarcascade_frontalface_default.xml"
    #detect_model = FaceDetector_Opencv(model_path)
    if os.path.exists(save_dir):
        pass
    else:
        os.makedirs(save_dir)

    def img_crop(img, bbox):
        imgh, imgw, imgc = img.shape
        x1 = int(bbox[0])
        y1 = int(bbox[1])
        x2 = int(bbox[2])
        y2 = int(bbox[3])
        if config.id_box_widen:
            boxw = x2 - x1
            boxh = y2 - y1
            x1 = max(0, int(x1 - 0.2 * boxw))
            y1 = max(0, int(y1 - 0.1 * boxh))
            x2 = min(imgw, int(x2 + 0.2 * boxw))
            #y2 = min(imgh,int(y2+0.1*boxh))
        cropimg = img[y1:y2, x1:x2, :]
        return cropimg

    idx_cnt = 0

    def label_show(img, rectangles):
        for rectangle in rectangles:
            score_label = str("{:.2f}".format(rectangle[4]))
            cv2.putText(img, score_label,
                        (int(rectangle[0]), int(rectangle[1])),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
            cv2.rectangle(img, (int(rectangle[0]), int(rectangle[1])),
                          (int(rectangle[2]), int(rectangle[3])), (255, 0, 0),
                          1)
            if len(rectangle) > 5:
                if config.x_y:
                    for i in range(5, 15, 2):
                        cv2.circle(
                            img,
                            (int(rectangle[i + 0]), int(rectangle[i + 1])), 2,
                            (0, 255, 0))
                else:
                    rectangle = rectangle[5:]
                    for i in range(5):
                        cv2.circle(img,
                                   (int(rectangle[i]), int(rectangle[i + 5])),
                                   2, (0, 255, 0))

    def sort_box(boxes_or):
        boxes = np.array(boxes_or)
        x1 = boxes[:, 0]
        y1 = boxes[:, 1]
        x2 = boxes[:, 2]
        y2 = boxes[:, 3]
        area = np.multiply(x2 - x1 + 1, y2 - y1 + 1)
        I = area.argsort()[::-1]
        #print(I)
        #print(boxes_or[0])
        idx = map(int, I)
        return boxes[idx[:]].tolist()

    def img_ratio(img, img_h):
        h, w, c = img.shape
        ratio_ = float(h) / float(w)
        img_w = img_h / ratio_
        img_out = cv2.resize(img, (int(img_w), int(img_h)))
        return img_out

    for line_1 in lines_:
        line_1 = line_1.strip()
        img_path = os.path.join(base_dir, line_1)
        img = cv2.imread(img_path)
        img = img_ratio(img, 320)
        line_s = line_1.split("/")
        img_name = line_s[-1]
        new_dir = '/'.join(line_s[:-1])
        rectangles = detect_model.detectFace(img)
        if len(rectangles) > 0:
            idx_cnt += 1
            rectangles = sort_box(rectangles)
            img_out = img_crop(img, rectangles[0])
            #savepath = os.path.join(save_dir,str(idx_cnt)+".jpg")
            img_out = cv2.resize(img_out, (crop_size[1], crop_size[0]))
            savepath = os.path.join(save_dir, line_1)
            '''
            savedir = os.path.join(save_dir,new_dir)
            if os.path.exists(savedir):
                savepath = os.path.join(savedir,img_name)
                shutil.copyfile(img_path,savepath)
            else:
                os.makedirs(savedir)
                savepath = os.path.join(savedir,img_name)
                shutil.copyfile(img_path,savepath)
            '''
            cv2.imwrite(savepath, img_out)
            cv2.waitKey(1000)
            #cv2.imwrite(savepath,img)
            label_show(img, rectangles)
            cv2.imshow("crop", img_out)
        else:
            print("failed ", img_path)
        cv2.imshow("test", img)
        cv2.waitKey(10)