コード例 #1
0
def net(stage):
    detectors = [None, None, None]
    if stage in ['pnet', 'rnet', 'onet']:
        modelPath = '/home/lxz/project/faceid/main/tmp/model/pnet/'
        a = [
            b[5:-6] for b in os.listdir(modelPath)
            if b.startswith('pnet-') and b.endswith('.index')
        ]
        maxEpoch = max(map(int, a))  # auto match a max epoch model
        modelPath = os.path.join(modelPath, "pnet-%d" % (maxEpoch))
        logging.info("Use PNet model: %s" % (modelPath))
        detectors[0] = FcnDetector(P_Net, modelPath)
    if stage in ['rnet', 'onet']:
        modelPath = '/home/lxz/project/faceid/main/tmp/model/rnet/'
        a = [
            b[5:-6] for b in os.listdir(modelPath)
            if b.startswith('rnet-') and b.endswith('.index')
        ]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "rnet-%d" % (maxEpoch))
        logging.info("Use RNet model: %s" % (modelPath))
        detectors[1] = Detector(R_Net, 24, 1, modelPath)
    if stage in ['onet']:
        modelPath = '/home/lxz/project/faceid/main/tmp/model/onet/'
        a = [
            b[5:-6] for b in os.listdir(modelPath)
            if b.startswith('onet-') and b.endswith('.index')
        ]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "onet-%d" % (maxEpoch))
        logging.info("Use ONet model: %s" % (modelPath))
        detectors[2] = Detector(O_Net, 48, 1, modelPath)
    return detectors
コード例 #2
0
def test(stage, testFolder):
    print("Start testing in %s"%(testFolder))
    detectors = [None, None, None]
    if stage in ['pnet', 'rnet', 'onet']:
        modelPath = os.path.join(rootPath, 'tmp/model/pnet/')
        a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('pnet-') and b.endswith('.index')]
        maxEpoch = max(map(int, a)) # auto match a max epoch model
        modelPath = os.path.join(modelPath, "pnet-%d"%(maxEpoch))
        print("Use PNet model: %s"%(modelPath))
        detectors[0] = FcnDetector(P_Net,modelPath) 
    if stage in ['rnet', 'onet']:
        modelPath = os.path.join(rootPath, 'tmp/model/rnet/')
        a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('rnet-') and b.endswith('.index')]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "rnet-%d"%(maxEpoch))
        print("Use RNet model: %s"%(modelPath))
        detectors[1] = Detector(R_Net, 24, 1, modelPath)
    if stage in ['onet']:
        modelPath = os.path.join(rootPath, 'tmp/model/onet/')
        a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('onet-') and b.endswith('.index')]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "onet-%d"%(maxEpoch))
        print("Use ONet model: %s"%(modelPath))
        detectors[2] = Detector(O_Net, 48, 1, modelPath)
    mtcnnDetector = MtcnnDetector(detectors=detectors, min_face_size = 24, threshold=[0.9, 0.6, 0.7])

    testImages = []
    for name in os.listdir(testFolder):
        testImages.append(os.path.join(testFolder, name))
    testDatas = TestLoader(testImages)
    # Now to detect
    allBoxes, allLandmarks = mtcnnDetector.detect_face(testDatas)
    print("\n")
    # Save it
    for idx, imagePath in enumerate(testImages):
        savePath = os.path.join(rootPath, 'testing', 'results_%s'%(stage))
        if not os.path.isdir(savePath):
            os.makedirs(savePath)

        image = cv2.imread(imagePath)

        save_bboxes(savePath,idx,image,allBoxes[idx])


        for bbox in allBoxes[idx]:
            cv2.putText(image,str(np.round(bbox[4],2)),(int(bbox[0]),int(bbox[1])),cv2.FONT_HERSHEY_TRIPLEX,1,color=(255,0,255))
            cv2.rectangle(image, (int(bbox[0]),int(bbox[1])),(int(bbox[2]),int(bbox[3])),(0,0,255))
        allLandmark = allLandmarks[idx]
        if allLandmark is not None: # pnet and rnet will be ignore landmark
            for landmark in allLandmark:
                for i in range(len(landmark)/2):
                    cv2.circle(image, (int(landmark[2*i]),int(int(landmark[2*i+1]))), 3, (0,0,255))

        cv2.imwrite(os.path.join(savePath, "result_%d.jpg" %(idx)), image)
        print("Save image to %s"%(savePath))
コード例 #3
0
def main(args):
    '''通过PNet或RNet生成下一个网络的输入'''
    size = args.input_size
    batch_size = config.batches
    min_face_size = config.min_face
    stride = config.stride
    thresh = config.thresh
    #模型地址
    model_path = ['../model/PNet/', '../model/RNet/', '../model/ONet']
    if size == 12:
        net = 'PNet'
        save_size = 24
    elif size == 24:
        net = 'RNet'
        save_size = 48

    # 图片数据地址
    base_dir = 'g:/mtcnn-dataset/data/WIDER_train/'
    # 处理后的图片存放地址
    data_dir = 'g:/mtcnn-dataset/data/%d' % (save_size)

    neg_dir = os.path.join(data_dir, 'negative')
    pos_dir = os.path.join(data_dir, 'positive')
    part_dir = os.path.join(data_dir, 'part')
    for dir_path in [neg_dir, pos_dir, part_dir]:
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
    detectors = [None, None, None]
    PNet = FcnDetector(P_Net, model_path[0])
    detectors[0] = PNet
    if net == 'RNet':
        RNet = Detector(R_Net, 24, batch_size[1], model_path[1])
        detectors[1] = RNet

    filename = '../data/wider_face_train_bbx_gt.txt'
    #读取文件的image和box对应函数在utils中
    data = read_annotation(base_dir, filename)
    mtcnn_detector = MtcnnDetector(detectors,
                                   min_face_size=min_face_size,
                                   stride=stride,
                                   threshold=thresh)
    save_path = data_dir
    save_file = os.path.join(save_path, 'detections.pkl')
    if not os.path.exists(save_file):
        #将data制作成迭代器
        print('载入数据')
        test_data = TestLoader(data['images'])
        detectors, _ = mtcnn_detector.detect_face(test_data)
        print('完成识别')

        with open(save_file, 'wb') as f:
            pickle.dump(detectors, f, 1)
    print('开始生成图像')
    save_hard_example(save_size, data, neg_dir, pos_dir, part_dir, save_path)
コード例 #4
0
def test(stage, testFolder):
    print("Start testing in %s"%(testFolder))
    detectors = [None, None, None]
    if stage in ['pnet', 'rnet', 'onet']:
        modelPath = os.path.join(rootPath, 'tmp/model/pnet/')
        a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('pnet-') and b.endswith('.index')]
        maxEpoch = max(map(int, a)) # auto match a max epoch model
        modelPath = os.path.join(modelPath, "pnet-%d"%(maxEpoch))
        print("Use PNet model: %s"%(modelPath))
        detectors[0] = FcnDetector(P_Net,modelPath)
    if stage in ['rnet', 'onet']:
        modelPath = os.path.join(rootPath, 'tmp/rnet/model/middle/')
        a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('rnet-') and b.endswith('.index')]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "rnet-%d"%(maxEpoch))
        print("Use RNet model: %s"%(modelPath))
        detectors[1] = Detector(R_Net, 24, 1, modelPath)
    if stage in ['onet']:
        modelPath = os.path.join(rootPath, 'tmp/onet/model/small/')
        a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('onet-') and b.endswith('.index')]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "onet-%d"%(maxEpoch))
        print("Use ONet model: %s"%(modelPath))
        detectors[2] = Detector(O_Net, 48, 1, modelPath)
    mtcnnDetector = MtcnnDetector(detectors=detectors, min_face_size =12, threshold=[0.6, 0.6, 0.7],scale_factor=0.7)

    testImages = []
    for name in os.listdir(testFolder):
        testImages.append(os.path.join(testFolder, name))

    print("\n")
    right_num=0
    miss_num=0
    FN=0
    # Save it
    for idx, imagePath in enumerate(testImages):
        if(idx<=6000):
            image = cv2.imread(imagePath)
            image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
            print(right_num,FN,miss_num)
            try:
                allBoxes, allLandmarks = mtcnnDetector.detect_face([image])
                if(allBoxes.__len__()==1):
                    right_num+=1
                else:
                    FN+=(allBoxes.__len__()-1)

            except:
                miss_num+=1
                pass
        else:
            break
コード例 #5
0
def test(stage):
    detectors = [None, None, None]
    if stage in ['pnet', 'rnet', 'onet']:
        modelPath = os.path.join(rootPath, 'tmp/model/pnet/')
        a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('pnet-') and b.endswith('.index')]
        maxEpoch = max(map(int, a)) # auto match a max epoch model
        modelPath = os.path.join(modelPath, "pnet-%d"%(maxEpoch))
        print("Use PNet model: %s"%(modelPath))
        detectors[0] = FcnDetector(P_Net,modelPath)
    if stage in ['rnet', 'onet']:
        modelPath = os.path.join(rootPath, 'tmp/model/rnet/')
        a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('rnet-') and b.endswith('.index')]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "rnet-%d"%(maxEpoch))
        print("Use RNet model: %s"%(modelPath))
        detectors[1] = Detector(R_Net, 24, 1, modelPath)
    if stage in ['onet']:
        modelPath = os.path.join(rootPath, 'tmp/model/onet/')
        a = [b[5:-6] for b in os.listdir(modelPath) if b.startswith('onet-') and b.endswith('.index')]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "onet-%d"%(maxEpoch))
        print("Use ONet model: %s"%(modelPath))
        detectors[2] = Detector(O_Net, 48, 1, modelPath)
    mtcnnDetector = MtcnnDetector(detectors=detectors, min_face_size =50, threshold=[0.8, 0.8, 0.9],scale_factor=0.4)

    cap = cv2.VideoCapture(0)
    while(True):
        testImages = []
        ret, image = cap.read()
        image=cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        testImages.append(image)
        # Now to detect
        starttime=time.time()
        allBoxes, allLandmarks = mtcnnDetector.detect_face(testImages)
        # print("\n")
        # Save it
        # print(time.time()-starttime)
        for bbox in allBoxes[0]:
            cv2.putText(image,str(np.round(bbox[4],2)),(int(bbox[0]),int(bbox[1])),cv2.FONT_HERSHEY_TRIPLEX,1,color=(255,0,255))
            cv2.rectangle(image, (int(bbox[0]),int(bbox[1])),(int(bbox[2]),int(bbox[3])),(0,0,255))
        allLandmark = allLandmarks[0]
        if allLandmark is not None: # pnet and rnet will be ignore landmark
            for landmark in allLandmark:
                for i in range(int(len(landmark)/2)):
                    cv2.circle(image, (int(landmark[2*i]),int(int(landmark[2*i+1]))), 3, (0,0,255))
        cv2.imshow("test", image)
        c = cv2.waitKey(1) & 0xFF
        if c == 27 or c == ord('q'):
            break
コード例 #6
0
def test_net(batch_size, stage, thresh, min_face_size, stride):
    print(">>>>>> Detect bbox for %s..." % (stage))
    detectors = [None, None, None]
    if stage in ["rnet", "onet"]:
        modelPath = os.path.join(rootPath, 'tmp/model/pnet/')
        # 第几个checkpoint
        a = [
            b[5:-6] for b in os.listdir(modelPath)
            if b.startswith('pnet-') and b.endswith('.index')
        ]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "pnet-%d" % (maxEpoch))
        print("Use PNet model: %s" % (modelPath))
        PNet = FcnDetector(P_Net, modelPath)
        detectors[0] = PNet
    if stage in ["onet"]:
        modelPath = os.path.join(rootPath, 'tmp/model/rnet/')
        a = [
            b[5:-6] for b in os.listdir(modelPath)
            if b.startswith('rnet-') and b.endswith('.index')
        ]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "rnet-%d" % (maxEpoch))
        print("Use RNet model: %s" % (modelPath))
        RNet = Detector(R_Net, 24, batch_size, modelPath)
        detectors[1] = RNet
    # read annatation(type:dict)
    widerImagesPath = os.path.join(rootPath, "dataset", "WIDER_train",
                                   "images")
    annoTxtPath = os.path.join(rootPath, "dataset",
                               "wider_face_train_bbx_gt.txt")
    # data['images'], data['bboxes']
    data = read_wider_annotation(widerImagesPath, annoTxtPath)
    mtcnn_detector = MtcnnDetector(detectors=detectors,
                                   min_face_size=min_face_size,
                                   stride=stride,
                                   threshold=thresh)
    test_data = TestLoader(data['images'])
    # do detect
    detections, _ = mtcnn_detector.detect_face(test_data)
    # save detect result
    save_path = os.path.join(rootPath, "tmp/data", stage)
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    save_file = os.path.join(save_path, "detections.pkl")
    with open(save_file, 'wb') as f:
        pickle.dump(detections, f, 1)
    print("\nDone! Start to do OHEM...")
    __save_data(stage, data, save_path)
コード例 #7
0
 def load_model(self):
     thresh = [0.6, 0.7, 0.7]
     min_face_size = 20
     stride = 2
     slide_window = False
     detectors = [None, None, None]
     prefix = [
         './weight/PNet_landmark/PNet', './weight/RNet_landmark/RNet',
         './weight/ONet_landmark/ONet'
     ]
     epoch = [18, 14, 16]
     model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)]
     PNet, RNet, ONet = FcnDetector(P_Net, model_path[0]), Detector(R_Net, 24, 1, model_path[1]), \
                        Detector(O_Net, 48, 1, model_path[2])
     detectors[0], detectors[1], detectors[2] = PNet, RNet, ONet
     self.mtcnn_detector = MtcnnDetector(detectors=detectors,
                                         min_face_size=min_face_size,
                                         stride=stride,
                                         threshold=thresh,
                                         slide_window=slide_window)
コード例 #8
0
def load_mtcnn():
    MODEL_PATH = config.MTCNN_MODEL_PATH
    MIN_FACE_SIZE = int(config.MIN_FACE_SIZE)
    STEPS_THRESHOLD = [float(i) for i in config.STEPS_THRESHOLD.split(",")]

    detectors = [None, None, None]
    prefix = [MODEL_PATH + "/PNet_landmark/PNet",
              MODEL_PATH + "/RNet_landmark/RNet",
              MODEL_PATH + "/ONet_landmark/ONet"]
    epoch = [18, 14, 16]
    model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)]
    PNet = FcnDetector(P_Net, model_path[0])
    detectors[0] = PNet
    RNet = Detector(R_Net, 24, 1, model_path[1])
    detectors[1] = RNet
    ONet = Detector(O_Net, 48, 1, model_path[2])
    detectors[2] = ONet
    mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=MIN_FACE_SIZE, threshold=STEPS_THRESHOLD)

    return mtcnn_detector
コード例 #9
0
def test(stage, profiling):
    print("Start Detecting")
    detectors = [None, None, None]
    if stage in ['pnet', 'rnet', 'onet']:
        modelPath = os.path.join(rootPath, 'tmp/model/pnet/')
        a = [
            b[5:-6] for b in os.listdir(modelPath)
            if b.startswith('pnet-') and b.endswith('.index')
        ]
        maxEpoch = max(map(int, a))  # auto match a max epoch model
        modelPath = os.path.join(modelPath, "pnet-%d" % (maxEpoch))
        print("Use PNet model: %s" % (modelPath))
        detectors[0] = FcnDetector(P_Net, modelPath, profiling)
    if stage in ['rnet', 'onet']:
        modelPath = os.path.join(rootPath, 'tmp/model/rnet/')
        a = [
            b[5:-6] for b in os.listdir(modelPath)
            if b.startswith('rnet-') and b.endswith('.index')
        ]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "rnet-%d" % (maxEpoch))
        print("Use RNet model: %s" % (modelPath))
        detectors[1] = Detector(R_Net, 24, 1, modelPath, profiling)
    if stage in ['onet']:
        modelPath = os.path.join(rootPath, 'tmp/model/onet/')
        a = [
            b[5:-6] for b in os.listdir(modelPath)
            if b.startswith('onet-') and b.endswith('.index')
        ]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "onet-%d" % (maxEpoch))
        print("Use ONet model: %s" % (modelPath))
        detectors[2] = Detector(O_Net, 48, 1, modelPath, profiling)
    mtcnnDetector = MtcnnDetector(detectors=detectors,
                                  min_face_size=24,
                                  threshold=[0.9, 0.6, 0.7])

    # Now to detect
    camID = 0
    cap = cv2.VideoCapture(camID)
    while True:
        ret, image = cap.read()
        if ret == 0:
            break
        [h, w] = image.shape[:2]
        print(h, w)
        #image_data = cv2.flip(image, 1)
        #image_data = cv2.flip(image, 1)
        image_data = image
        start_time = time.time()
        testImages = []
        testImages.append(image_data)
        allBoxes, allLandmarks = mtcnnDetector.detect_face(testImages)
        inf_time = time.time() - start_time
        print("inference time(s): {}".format(inf_time))
        del testImages[0]
        #print("allBoxes: {}".format(allBoxes))
        #print("allLandmarks: {}".format(allLandmarks))
        #print("\n")

        # Save it
        if (len(allBoxes) >= 1):
            for idx, bbox in enumerate(allBoxes):
                cv2.putText(image_data,
                            str(np.round(bbox[idx][4], 2)),
                            (int(bbox[idx][0]), int(bbox[idx][1])),
                            cv2.FONT_HERSHEY_TRIPLEX,
                            1,
                            color=(255, 0, 255))
                cv2.rectangle(image_data,
                              (int(bbox[idx][0]), int(bbox[idx][1])),
                              (int(bbox[idx][2]), int(bbox[idx][3])),
                              (0, 0, 255))
                allLandmark = allLandmarks[idx][0].tolist()
                total_landmark_pts = len(allLandmark)
                if allLandmark is not None and len(
                        allLandmark
                ) == 10:  # pnet and rnet will be ignore landmark
                    for index, landmark in enumerate(allLandmark):
                        for i in range(int(total_landmark_pts / 2)):
                            cv2.circle(image_data,
                                       (int(allLandmark[2 * i]),
                                        int(int(allLandmark[2 * i + 1]))), 3,
                                       (255, 255, 255))
        cv2.imshow('Face/Landmark Detection', image_data)
        k = cv2.waitKey(1) & 0xff
        if k == ord('q') or k == 27:
            break
    cap.release()
コード例 #10
0
def test(stage, testFolder):
    print("Start testing in %s" % (testFolder))
    detectors = [None, None, None]
    if stage in ['pnet', 'rnet', 'onet']:
        modelPath = os.path.join(rootPath, 'tmp/origin/model/pnet')
        a = [
            b[5:-6] for b in os.listdir(modelPath)
            if b.startswith('pnet-') and b.endswith('.index')
        ]
        maxEpoch = max(map(int, a))  # auto match a max epoch model
        modelPath = os.path.join(modelPath, "pnet-%d" % (maxEpoch))
        print("Use PNet model: %s" % (modelPath))
        detectors[0] = FcnDetector(P_Net, modelPath)
    if stage in ['rnet', 'onet']:
        modelPath = os.path.join(rootPath, 'tmp/origin/model/rnet')
        a = [
            b[5:-6] for b in os.listdir(modelPath)
            if b.startswith('rnet-') and b.endswith('.index')
        ]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "rnet-%d" % (maxEpoch))
        print("Use RNet model: %s" % (modelPath))
        detectors[1] = Detector(R_Net, 24, 1, modelPath)
    if stage in ['onet']:
        modelPath = os.path.join(rootPath, 'tmp/origin/model/onet')
        a = [
            b[5:-6] for b in os.listdir(modelPath)
            if b.startswith('onet-') and b.endswith('.index')
        ]
        maxEpoch = max(map(int, a))
        modelPath = os.path.join(modelPath, "onet-%d" % (maxEpoch))
        print("Use ONet model: %s" % (modelPath))
        detectors[2] = Detector(O_Net, 48, 1, modelPath)
    mtcnnDetector = MtcnnDetector(detectors=detectors,
                                  min_face_size=50,
                                  threshold=[0.8, 0.9, 0.9])

    fileFoldName = "faceListInt.txt"

    outFilename = 'F:/software/yansan/MTCNN-on-FDDB-Dataset-master/FDDB-folds/' + 'predict.txt'  # fileOutName
    foldFilename = 'F:/software/yansan/MTCNN-on-FDDB-Dataset-master/FDDB-folds/' + fileFoldName

    prefixFilename = 'E:/database/FDDB_Face Detection Data Set and Benchmark/'

    fout = open(outFilename, 'a+')

    f = open(foldFilename, 'r')  # FDDB-fold-00.txt, read
    for imgpath in tqdm(f.readlines()):
        testImages = []
        imgpath = imgpath.split('\n')[0]
        # foutOnce.write(imgpath+'\n')
        # foutFold.write(imgpath+'\r')
        img = cv2.imread(prefixFilename + imgpath + '.jpg')
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        if img is None:
            continue
        testImages.append(img)
        boundingboxes, points = mtcnnDetector.detect_face(testImages)
        # boundingboxes, points = demo.detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)

        text1 = str(imgpath) + '\n' + str(len(boundingboxes[0])) + '\n'
        fout.write(text1)  # FDDB-fold-%02d-out.txt or predict.txt

        for bbox in boundingboxes[0]:
            # print(bbox,"???")
            text2 = str(int(bbox[0])) + ' ' + str(int(bbox[1])) + ' ' \
                    + str(abs(int(bbox[2] - bbox[0]))) + ' ' \
                    + str(abs(int(bbox[3] - bbox[1]))) + ' ' \
                    + str(bbox[4]) + '\n'

            fout.write(text2)  # FDDB-fold-%02d-out.txt or predict.txt
            # text2 = str(int(boundingboxes[coordinate][0][0]))

            # fout.write(text2)  # FDDB-fold-%02d-out.txt or predict.txt

    # print error
    f.close()  # input the fold list, FDDB-fold-00.txt
    fout.close()  # output the result, predict.txt
コード例 #11
0
                           (0, 0, 255), 2)

            cv2.imwrite(join('output', sub_labels.image_id.iloc[i]), img)

#%% model initialize
# test_mode = config.test_mode
test_mode = 'ONet'
thresh = [0.6, 0.7, 0.8]
min_face_size = 24
stride = 2
detectors = [None, None, None]

# 模型放置位置
model_path = ['model/PNet/', 'model/RNet/', 'model/ONet']
batch_size = [2048, 256, 32]
PNet = FcnDetector(P_Net, model_path[0])  # detecotors for PNet
detectors[0] = PNet

# in and output path
path = 'picture'
out_path = 'output'

detectors[1] = Detector(R_Net, 24, batch_size[1], model_path[1])
detectors[2] = Detector(O_Net, 48, batch_size[2], model_path[2])

# Use the three detectors to construct a
mtcnn_detector = MtcnnDetector(detectors=detectors,
                               min_face_size=min_face_size,
                               stride=stride,
                               threshold=thresh,
                               scale_factor=0.909)
コード例 #12
0
from detection.fcn_detector import FcnDetector
from train.model import P_Net,R_Net,O_Net
import train.config as config

# In[ ]:


test_mode=config.test_mode
thresh=config.thresh
min_face_size=config.min_face
stride=config.stride
detectors=[None,None,None]
# 模型放置位置
model_path=['model/PNet/','model/RNet/','model/ONet']
batch_size=config.batches
PNet=FcnDetector(P_Net,model_path[0])
detectors[0]=PNet


if test_mode in ["RNet", "ONet"]:
    RNet = Detector(R_Net, 24, batch_size[1], model_path[1])
    detectors[1] = RNet


if test_mode == "ONet":
    ONet = Detector(O_Net, 48, batch_size[2], model_path[2])
    detectors[2] = ONet

mtcnn_detector = MtcnnDetector(detectors=detectors, min_face_size=min_face_size,
                               stride=stride, threshold=thresh)
out_path=config.out_path