Beispiel #1
0
def evalu_img(args):
    imgpath = args.img_path
    min_size = args.min_size
    model_dir = args.model_dir
    cv2.namedWindow("test")
    cv2.moveWindow("test",1400,10)
    base_name = "test_img"
    save_dir = './output'
    crop_size = [112,112]
    FaceAnti_model_dir = os.path.join(model_dir,cfgs.DATASET_NAME)
    model_path = os.path.join(FaceAnti_model_dir,cfgs.MODEL_PREFIX)
    FaceAnti_Model = Face_Anti_Spoof(model_path,args.load_epoch,cfgs.IMG_SIZE,args.gpu,layer='fc')
    caffe_mode_dir = os.path.join(model_dir,'face_detect_models')
    threshold = np.array([0.7,0.8,0.95])
    #Detect_Model = MTCNNDet(min_size,threshold,caffe_mode_dir)
    Detect_Model = MtcnnDetector(min_size,threshold,caffe_mode_dir)
    img = cv2.imread(imgpath)
    h,w = img.shape[:2]
    if cfgs.img_downsample and h > 1000:
        img = img_ratio(img,240)
    rectangles = Detect_Model.detectFace(img)
    #draw = img.copy()
    face_attributes = []
    if len(rectangles)>0:
        rectangles = sort_box(rectangles)
        img_verify = img_crop(img,rectangles[0],img.shape[1],img.shape[0])
        face_attributes.append(img_verify)
        tmp,pred_id = FaceAnti_Model.inference(face_attributes)
        label_show(img,rectangles,tmp,pred_id)
    else:
        print("No face detected")
    cv2.imshow("test",img)
    cv2.waitKey(0)
 def __init__(self, args):
     self.Detect_Model = MtcnnDetector(args)
     self.FaceAttributeModel = FaceAttribute(args)
     self.threshold = args.threshold
     self.img_dir = args.img_dir
     self.real_num = 0
     self.save_dir = args.save_dir
     if not os.path.exists(self.save_dir):
         os.makedirs(self.save_dir)
Beispiel #3
0
def video_demo(args):
    '''
    file_in: input video file path
    base_name: saved images prefix name
    '''
    file_in = args.file_in
    min_size = args.min_size
    model_dir = args.model_dir
    FaceAnti_model_dir = os.path.join(model_dir,cfgs.DATASET_NAME)
    model_path = os.path.join(FaceAnti_model_dir,cfgs.MODEL_PREFIX)
    if args.gpu is None:
        ctx = mx.cpu()
    else:
        ctx = mx.gpu(0)
    FaceAnti_Model = Face_Anti_Spoof(model_path,args.load_epoch,cfgs.IMG_SIZE,ctx=ctx)
    caffe_mode_dir = os.path.join(model_dir,'face_detect_models')
    threshold = np.array([0.7,0.8,0.95])
    #Detect_Model = MTCNNDet(min_size,threshold,caffe_mode_dir)
    Detect_Model = MtcnnDetector(min_size,threshold,caffe_mode_dir) 
    if file_in is None:
        v_cap = cv2.VideoCapture(0)
    else:
        v_cap = cv2.VideoCapture(file_in)
    if not v_cap.isOpened():
        print("field to open video")
    else:
        print("video frame num: ",v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
        #total_num = v_cap.get(cv2.CAP_PROP_FRAME_COUNT)
        frame_w = v_cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        frame_h = v_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        #frame_cnt = 0
        #class_dict = dict()
        #pred_id_show = int(0)
        cv2.namedWindow("src")
        cv2.namedWindow("crop")
        cv2.moveWindow("crop",1400,10)
        cv2.moveWindow("src",10,10)
        while v_cap.isOpened():
            ret,frame = v_cap.read()
            if ret: 
                rectangles = Detect_Model.detectFace(frame)
            else:
                continue
            face_attributes = []
            if len(rectangles)> 0:
                '''
                rectangles = sort_box(rectangles)
                frame_cnt+=1
                if frame_cnt == 10:
                    frame_cnt =0
                    for key_name in class_dict.keys():
                        if key_name in cfgs.DATA_NAME[1:]:
                            pred_id_show = int(1)
                            break
                        else:
                            pred_id_show = int(0)
                    class_dict.clear()
                else:
                    cur_cnt = class_dict.setdefault(cfgs.DATA_NAME[pred_id],0)
                    class_dict[cfgs.DATA_NAME[pred_id]] = cur_cnt+1
                '''
                for box in rectangles:
                    img_verify = img_crop(frame,box,frame_w,frame_h)
                    img_verify = Img_Pad(img_verify,cfgs.IMG_SIZE)
                    cv2.imshow('crop',img_verify)
                    face_attributes.append(img_verify)
                t1 = time.time()
                tmp,pred_id = FaceAnti_Model.inference(face_attributes)
                t2 = time.time()
                print("inference time: ",t2-t1)
                label_show(frame,rectangles,tmp,pred_id)
                #cv2.imshow('crop',img_verify)
            else:
                #print("failed ")
                pass
            cv2.imshow("src",frame)
            key_ = cv2.waitKey(10) & 0xFF
            if key_ == 27 or key_ == ord('q'):
                break
            #if fram_cnt == total_num:
             #   break
    v_cap.release()
    cv2.destroyAllWindows()
Beispiel #4
0
def evalue_fromtxt(args):
    '''
    file_in: images path recorded
    base_dir: images locate in 
    save_dir: detect faces saved in
    fun: saved id images, save name is the same input image
    '''
    file_in = args.file_in
    base_dir = args.base_dir
    #base_name = args.base_name
    #save_dir = args.save_dir
    #crop_size = args.crop_size
    #size_spl = crop_size.strip().split(',')
    #crop_size = [int(size_spl[0]),int(size_spl[1])]
    model_dir = args.model_dir
    min_size = args.min_size
    f_ = open(file_in,'r')
    failed_w = open('./output/failed_face3.txt','w')
    lines_ = f_.readlines()
    FaceAnti_model_dir = os.path.join(model_dir,cfgs.DATASET_NAME)
    model_path = os.path.join(FaceAnti_model_dir,cfgs.MODEL_PREFIX)
    FaceAnti_Model = Face_Anti_Spoof(model_path,args.load_epoch,cfgs.IMG_SIZE,args.gpu,layer='fc')
    caffe_mode_dir = os.path.join(model_dir,'face_detect_models')
    threshold = np.array([0.7,0.8,0.95])
    #Detect_Model = MTCNNDet(min_size,threshold,caffe_mode_dir)
    Detect_Model = MtcnnDetector(min_size,threshold,caffe_mode_dir)
    #mk_dirs(save_dir)
    idx_cnt = 0 
    if cfgs.show:
        cv2.namedWindow("src")
        cv2.namedWindow("crop")
        cv2.moveWindow("crop",650,10)
        cv2.moveWindow("src",100,10)
    total_item = len(lines_)
    for i in tqdm(range(total_item)):
        line_1 = lines_[i]
        line_1 = line_1.strip()
        img_path = os.path.join(base_dir,line_1)
        img = cv2.imread(img_path)
        if img is None:
            continue
        h,w = img.shape[:2]
        if cfgs.img_downsample and min(w,h) > 1000:
            img = img_ratio(img,240)
        line_s = line_1.split("/")  
        img_name = line_s[-1]
        new_dir = '/'.join(line_s[:-1]) 
        rectangles = Detect_Model.detectFace(img)
        face_attributes = []
        if len(rectangles)> 0:
            idx_cnt+=1
            rectangles = sort_box(rectangles)
            img_verify = img_crop(img,rectangles[0],img.shape[1],img.shape[0])
            face_attributes.append(img_verify)
            tmp,pred_id = FaceAnti_Model.inference(face_attributes)
            #cv2.imwrite(savepath,img)
            if cfgs.show:
                label_show(img,rectangles,tmp,pred_id)
                cv2.imshow("crop",img_verify)
                cv2.waitKey(1000)
        else:
            failed_w.write(img_path)
            failed_w.write('\n')
            print("failed ",img_path)
        if cfgs.show:
            cv2.imshow("src",img)
            cv2.waitKey(10)
    failed_w.close()
    f_.close()
Beispiel #5
0
 def __init__(self,args):
     self.Detect_Model = MtcnnDetector(args)
     self.BreathMaskModel = BreathMask(args)
     self.threshold = args.threshold
     self.img_dir = args.img_dir
     self.real_num = 0
Beispiel #6
0
class BreathMastTest(object):
    def __init__(self,args):
        self.Detect_Model = MtcnnDetector(args)
        self.BreathMaskModel = BreathMask(args)
        self.threshold = args.threshold
        self.img_dir = args.img_dir
        self.real_num = 0
    
    def img_crop(self,img,bbox,imgw,imgh):
        x1 = int(bbox[0])
        y1 = int(bbox[1])
        x2 = int(bbox[2])
        y2 = int(bbox[3])
        boxw = x2-x1
        boxh = y2-y1
        x1 = int(max(0,int(x1-0.3*boxw)))
        y1 = int(max(0,int(y1-0.3*boxh)))
        x2 = int(min(imgw,int(x2+0.3*boxw)))
        y2 = int(min(imgh,int(y2+0.3*boxh)))
        cropimg = img[y1:y2,x1:x2,:]
        return cropimg
    
    def label_show(self,img,rectangles,scores,pred_id):
        '''
        scores: shape-[batch,cls_nums]
        pred_id: shape-[batch,cls_nums]
        rectangles: shape-[batch,15]
            0-5: x1,y1,x2,y2,score
        '''
        show_labels = ['no_weare','weare_mask']
        colors = [(0,0,255),(255,0,0)]
        for idx,box in enumerate(rectangles):
            tmp_pred = pred_id[idx]
            tmp_score = '%.2f' % scores[idx]
            show_name = show_labels[int(tmp_pred)] +'_'+tmp_score
            color = colors[int(tmp_pred)]
            cv2.rectangle(img,(int(box[0]),int(box[1])),(int(box[2]),int(box[3])),color,1)
            font=cv2.FONT_HERSHEY_COMPLEX_SMALL
            font_scale = int((box[3]-box[1])*0.01)
            points = (int(box[0]),int(box[1]))
            cv2.putText(img, show_name, points, font, font_scale, color, 2)
        return img
        
    def inference_img(self,img):
        t1 = time.time()
        imgorg = img.copy()
        orgreg = 0
        img_out = img
        pred_ids = []
        if not orgreg:
            rectangles = self.Detect_Model.detectFace(imgorg)
            face_breathmasks = []
            frame_h,frame_w = imgorg.shape[:2]
            img_out = img.copy()
            pred_ids = []
            if len(rectangles)> 0:
                for box in rectangles:
                    img_verify = self.img_crop(imgorg,box,frame_w,frame_h)
                    img_verify = cv2.resize(img_verify,(112,112))
                    # img_verify = Img_Pad(img_verify,[cfg.InputSize_h,cfg.InputSize_w])
                    cv2.imshow('crop',img_verify)
                    face_breathmasks.append(img_verify)
                scores,pred_ids = self.BreathMaskModel.inference(face_breathmasks)
                img_out = self.label_show(img,rectangles,scores,pred_ids)
        else:
            scores,pred_ids = self.BreathMaskModel.inference([img])
            img_out = self.label_show(img,[[10,10,119,119]],scores,pred_ids)
        t2 = time.time()
        #print('consuming:',t2-t1)
        return img_out,pred_ids

    def __call__(self,imgpath):
        if os.path.isdir(imgpath):
            cnts = os.listdir(imgpath)
            for tmp in cnts:
                tmppath = os.path.join(imgpath,tmp.strip())
                img = cv2.imread(tmppath)
                if img is None:
                    continue
                frame,cnt_head = self.inference_img(img)
                # print('heads >> ',cnt_head)
                cv2.imshow('result',frame)
                #savepath = os.path.join(self.save_dir,save_name)
                # cv2.imwrite('test.jpg',frame)
                cv2.waitKey(0) 
        elif os.path.isfile(imgpath) and imgpath.endswith('txt'):
            # if not os.path.exists(self.save_dir):
            #     os.makedirs(self.save_dir)
            f_r = open(imgpath,'r')
            file_cnts = f_r.readlines()
            for j in tqdm(range(len(file_cnts))):
                tmp_file = file_cnts[j].strip()
                tmp_file_s = tmp_file.split('\t')
                if len(tmp_file_s)>0:
                    tmp_file = tmp_file_s[0]
                    self.real_num = int(tmp_file_s[1])
                if not tmp_file.endswith('jpg'):
                    tmp_file = tmp_file +'.jpg'
                # tmp_path = os.path.join(self.img_dir,tmp_file) 
                tmp_path = tmp_file
                if not os.path.exists(tmp_path):
                    print(tmp_path)
                    continue
                img = cv2.imread(tmp_path) 
                if img is None:
                    print('None',tmp)
                    continue
                frame,cnt_head = self.inference_img(img)
                cv2.imshow('result',frame)
                #savepath = os.path.join(self.save_dir,save_name)
                #cv2.imwrite('test.jpg',frame)
                cv2.waitKey(0) 
        elif os.path.isfile(imgpath) and imgpath.endswith(('.mp4','.avi')) :
            cap = cv2.VideoCapture(imgpath)
            if not cap.isOpened():
                print("failed open camera")
                return 0
            else: 
                while cap.isOpened():
                    _,img = cap.read()
                    frame,cnt_head = self.inference_img(img)
                    cv2.imshow('result',frame)
                    q=cv2.waitKey(10) & 0xFF
                    if q == 27 or q ==ord('q'):
                        break
            cap.release()
            cv2.destroyAllWindows()
        elif os.path.isfile(imgpath):
            img = cv2.imread(imgpath)
            if img is not None:
                # grab next frame
                # update FPS counter
                frame,cnt_head = self.inference_img(img)
                print(cnt_head)
                cv2.imshow('result',frame)
                # cv2.imwrite('test_a1.jpg',frame)
                key = cv2.waitKey(0) 
        elif imgpath=='video':
            cap = cv2.VideoCapture(0)
            if not cap.isOpened():
                print("failed open camera")
                return 0
            else: 
                while cap.isOpened():
                    _,img = cap.read()
                    frame,cnt_head = self.inference_img(img)
                    cv2.imshow('result',frame)
                    q=cv2.waitKey(10) & 0xFF
                    if q == 27 or q ==ord('q'):
                        break
            cap.release()
            cv2.destroyAllWindows()
        else:
            print('please input the right img-path')
class FaceAttrTest(object):
    def __init__(self, args):
        self.Detect_Model = MtcnnDetector(args)
        self.FaceAttributeModel = FaceAttribute(args)
        self.threshold = args.threshold
        self.img_dir = args.img_dir
        self.real_num = 0
        self.save_dir = args.save_dir
        if not os.path.exists(self.save_dir):
            os.makedirs(self.save_dir)

    def img_crop(self, img, bbox, imgw, imgh):
        x1 = int(bbox[0])
        y1 = int(bbox[1])
        x2 = int(bbox[2])
        y2 = int(bbox[3])
        boxw = x2 - x1
        boxh = y2 - y1
        x1 = int(max(0, int(x1 - 0.3 * boxw)))
        y1 = int(max(0, int(y1 - 0.3 * boxh)))
        x2 = int(min(imgw, int(x2 + 0.3 * boxw)))
        y2 = int(min(imgh, int(y2 + 0.3 * boxh)))
        cropimg = img[y1:y2, x1:x2, :]
        return cropimg

    def label_show(self, img, rectangles, scores, pred_id):
        '''
        scores: shape-[batch,cls_nums]
        pred_id: shape-[batch,cls_nums]
        rectangles: shape-[batch,15]
            0-5: x1,y1,x2,y2,score
        '''
        for idx, rectangle in enumerate(rectangles):
            tmp_pred = pred_id[idx]
            tmp_scores = scores[idx]
            labels = np.zeros(6, dtype=np.int32)
            show_scores = np.zeros(6)
            labels = [
                tmp_pred[0], tmp_pred[8], tmp_pred[10], tmp_pred[11],
                tmp_pred[15], tmp_pred[17]
            ]
            show_scores = [
                tmp_scores[0], tmp_scores[8], tmp_scores[10], tmp_scores[11],
                tmp_scores[15], tmp_scores[17]
            ]
            p_name = ['no_bear', 'bangs', 'male', 'hat', 'glass', 'smile']
            n_name = [
                'bear', 'no_bangs', 'female', 'no_hat', 'no_glass', 'no_smile'
            ]
            #p_color = [[255,0,0],[255,0,0],[255,0,0],[255,0,0],[255,0,0],[255,0,0],[255,0,0],[255,0,0]]
            #n_color = [[0,0,255],[0,0,255],[0,0,255],[0,0,255],[0,0,255],[0,0,255],[0,0,255],[0,0,255]]
            show_name = np.where(labels, p_name, n_name)
            # show_scores = np.where(labels,show_scores,1-show_scores)
            #show_color = np.where(labels,p_color,n_color)
            for t_id, score in enumerate(show_name):
                if labels[t_id]:
                    colors = (255, 0, 0)
                    tmp_s = show_scores[t_id]
                else:
                    colors = (0, 0, 255)
                    tmp_s = 1.0 - show_scores[t_id]
                score = score + '_' + '%.2f' % tmp_s
                if score == 'no_bear':
                    colors = (0, 0, 255)
                self.draw_label(img, rectangle, score, t_id, colors)
            if labels[3]:
                color_box = (255, 0, 0)
            else:
                color_box = (0, 0, 255)
            self.draw_box(img, rectangle, color_box)
        return img

    def draw_label(self,
                   image,
                   point,
                   label,
                   mode=0,
                   color=(255, 255, 255),
                   font=cv2.FONT_HERSHEY_COMPLEX_SMALL,
                   font_scale=2,
                   thickness=2):
        '''
        mode: 0~7
        '''
        size = cv2.getTextSize(label, font, font_scale, thickness)[0]
        x, y = int(point[0]), int(point[1])
        w, h = int(point[2]), int(point[3])
        lb_w = int(size[0])
        lb_h = int(size[1])
        unit = int(mode)
        unit2 = int(mode + 1)
        if y - int(unit2 * lb_h) <= 0:
            cv2.rectangle(image, (x, y + h + unit * lb_h),
                          (x + lb_w, y + h + unit2 * lb_h), color)
            cv2.putText(image, label, (x, y + h + unit2 * lb_h), font,
                        font_scale, color, thickness)
        else:
            cv2.rectangle(image, (x, y - unit2 * lb_h),
                          (x + lb_w, y - unit * lb_h), color)
            cv2.putText(image, label, (x, y - unit * lb_h), font, font_scale,
                        color, thickness)

    def draw_box(self, img, box, color=(255, 0, 0)):
        cv2.rectangle(img, (int(box[0]), int(box[1])),
                      (int(box[2]), int(box[3])), color, 1)
        '''
        if len(box)>5:
            if cfgs.x_y:
                for i in range(5,15,2):
                    cv2.circle(img,(int(box[i+0]),int(box[i+1])),2,(0,255,0))
            else:
                box = box[5:]
                for i in range(5):
                    cv2.circle(img,(int(box[i]),int(box[i+5])),2,(0,255,0))
        '''

    def inference_img(self, img):
        t1 = time.time()
        imgorg = img.copy()
        orgreg = 0
        img_out = img
        pred_ids = []
        if not orgreg:
            rectangles = self.Detect_Model.detectFace(imgorg)
            face_breathmasks = []
            frame_h, frame_w = imgorg.shape[:2]
            img_out = img.copy()
            pred_ids = []
            if len(rectangles) > 0:
                for box in rectangles:
                    img_verify = self.img_crop(imgorg, box, frame_w, frame_h)
                    # img_verify = cv2.resize(img_verify,(112,112))
                    img_verify = Img_Pad(img_verify,
                                         [cfg.InputSize_h, cfg.InputSize_w])
                    cv2.imshow('crop', img_verify)
                    face_breathmasks.append(img_verify)
                scores, pred_ids = self.FaceAttributeModel.inference(
                    face_breathmasks)
                img_out = self.label_show(img, rectangles, scores, pred_ids)
        else:
            scores, pred_ids = self.FaceAttributeModel.inference([img])
            img_out = self.label_show(img, [[10, 10, 119, 119]], scores,
                                      pred_ids)
        t2 = time.time()
        #print('consuming:',t2-t1)
        return img_out, pred_ids

    def __call__(self, imgpath):
        if os.path.isdir(imgpath):
            cnts = os.listdir(imgpath)
            for tmp in cnts:
                tmppath = os.path.join(imgpath, tmp.strip())
                img = cv2.imread(tmppath)
                if img is None:
                    continue
                frame, cnt_head = self.inference_img(img)
                # print('heads >> ',cnt_head)
                cv2.imshow('result', frame)
                savename = tmp.strip()
                savepath = os.path.join(self.save_dir, savename)
                cv2.imwrite(savepath, frame)
                cv2.waitKey(0)
        elif os.path.isfile(imgpath) and imgpath.endswith('txt'):
            # if not os.path.exists(self.save_dir):
            #     os.makedirs(self.save_dir)
            f_r = open(imgpath, 'r')
            file_cnts = f_r.readlines()
            for j in tqdm(range(len(file_cnts))):
                tmp_file = file_cnts[j].strip()
                tmp_file_s = tmp_file.split('\t')
                if len(tmp_file_s) > 0:
                    tmp_file = tmp_file_s[0]
                    self.real_num = int(tmp_file_s[1])
                if not tmp_file.endswith('jpg'):
                    tmp_file = tmp_file + '.jpg'
                # tmp_path = os.path.join(self.img_dir,tmp_file)
                tmp_path = tmp_file
                if not os.path.exists(tmp_path):
                    print(tmp_path)
                    continue
                img = cv2.imread(tmp_path)
                if img is None:
                    print('None', tmp)
                    continue
                frame, cnt_head = self.inference_img(img)
                cv2.imshow('result', frame)
                #savepath = os.path.join(self.save_dir,save_name)
                #cv2.imwrite('test.jpg',frame)
                cv2.waitKey(0)
        elif os.path.isfile(imgpath) and imgpath.endswith(('.mp4', '.avi')):
            cap = cv2.VideoCapture(imgpath)
            if not cap.isOpened():
                print("failed open camera")
                return 0
            else:
                while cap.isOpened():
                    _, img = cap.read()
                    frame, cnt_head = self.inference_img(img)
                    cv2.imshow('result', frame)
                    q = cv2.waitKey(10) & 0xFF
                    if q == 27 or q == ord('q'):
                        break
            cap.release()
            cv2.destroyAllWindows()
        elif os.path.isfile(imgpath):
            img = cv2.imread(imgpath)
            if img is not None:
                # grab next frame
                # update FPS counter
                frame, cnt_head = self.inference_img(img)
                print(cnt_head)
                cv2.imshow('result', frame)
                # cv2.imwrite('test_a1.jpg',frame)
                key = cv2.waitKey(0)
        elif imgpath == 'video':
            cap = cv2.VideoCapture(0)
            if not cap.isOpened():
                print("failed open camera")
                return 0
            else:
                while cap.isOpened():
                    _, img = cap.read()
                    frame, cnt_head = self.inference_img(img)
                    cv2.imshow('result', frame)
                    q = cv2.waitKey(10) & 0xFF
                    if q == 27 or q == ord('q'):
                        break
            cap.release()
            cv2.destroyAllWindows()
        else:
            print('please input the right img-path')
Beispiel #8
0
def evalu_img(args):
    cv2.namedWindow("test")
    cv2.moveWindow("test", 1400, 10)
    threshold = np.array([0.5, 0.7, 0.9])
    base_name = "test_img"
    save_dir = './output'
    crop_size = [112, 112]
    #detect_model = MTCNNDet(min_size,threshold)
    detect_model = MtcnnDetector(args)
    #alignface = Align_img(crop_size)
    imgorg = cv2.imread(args.file_in)
    #img = cv2.resize(img,(640,480))
    #img = cv2.cvtColor(imgorg,cv2.COLOR_BGR2RGB)
    img = imgorg
    h, w = img.shape[:2]
    rectangles = detect_model.detectFace(img)
    #draw = img.copy()
    print("num:", len(rectangles))
    if len(rectangles) > 0:
        points = np.array(rectangles)
        #print('rec shape',points.shape)
        points = points[:, 5:]
        #print("landmarks: ",points)
        points_list = points.tolist()
        # crop_imgs = alignImg(img,crop_size,points_list)
        crop_imgs = alignImg_opencv(img, crop_size, points_list)
        # crop_imgs = alignImg_solve(img,crop_size,points_list)
        #crop_imgs = alignface.extract_image_chips(img,points_list)
        # crop_imgs = alignImg_angle(img,crop_size,points_list)
        for idx_cnt, img_out in enumerate(crop_imgs):
            savepath = os.path.join(save_dir,
                                    base_name + '_' + str(idx_cnt) + ".jpg")
            #img_out = cv2.resize(img_out,(112,112))
            cv2.imshow("crop", img_out)
            cv2.waitKey(0)
            cv2.imwrite(savepath, img_out)
        for rectangle in rectangles:
            print('w,h', rectangle[2] - rectangle[0],
                  rectangle[3] - rectangle[1])
            score_label = str("{:.2f}".format(rectangle[4]))
            cv2.putText(imgorg, score_label,
                        (int(rectangle[0]), int(rectangle[1])),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
            cv2.rectangle(imgorg, (int(rectangle[0]), int(rectangle[1])),
                          (int(rectangle[2]), int(rectangle[3])),
                          (239, 185, 0), 2)
            if len(rectangle) > 5:
                if 1:
                    for i in range(5, 15, 2):
                        cv2.circle(
                            imgorg,
                            (int(rectangle[i + 0]), int(rectangle[i + 1])), 2,
                            (0, 255, 0))
                else:
                    rectangle = rectangle[5:]
                    for i in range(5):
                        cv2.circle(imgorg,
                                   (int(rectangle[i]), int(rectangle[i + 5])),
                                   2, (0, 255, 0))
    else:
        print("No face detected")
    cv2.imshow("test", imgorg)
    cv2.waitKey(0)