예제 #1
0
def test(args):
    #获取所有人脸特征,并存到pkl文件中
    model = face_model.FaceModel(args)
    pathss=[]
    for root, dirs, files in os.walk("/home/huangju/dataset/face_imgs_detected_larger"):
        path = [os.path.join(root, name) for name in files]
            #print(path)
        pathss.extend(path)
    #print(pathss)
    #print("finish")
    count=0 #记录提取特征的人脸的图片数量
    d={} #key: path  value: feature
    for path in tqdm(pathss):
        if path.split('/')[-1].split("-")[0]=="train" or path.split('/')[-1].split("-")[0]=="query":
            continue
        img = cv2.imread(path)
        img = model.get_input(img)
        if img is not None:
            feature = model.get_feature(img)
            key="data/msmt17/test/"+path.split('/')[-1].split('-')[1]+"/"+path.split('/')[-1].split('-')[-1]
            d[key]=feature
            count+=1
            #print("count: {}".format(count))
    

    # with open("msmt_face_q_t-own2.pkl","wb") as f: 
    #     pickle.dump(d,f)
    with open("face_feature-surv_ms1m_msmt.pkl","wb") as f: 
        pickle.dump(d,f)
예제 #2
0
def mkface(imgs_dir, faces_save_dst):
	args = Args()
	model = face_model.FaceModel(args)

	if not os.path.exists(faces_save_dst):
		os.mkdir(faces_save_dst)

	folders = os.listdir(imgs_dir)

	for folder in folders:
		imgs = os.listdir(os.path.join(imgs_dir, folder))
		n = len(imgs)
		cnt = 0
		for img in imgs:
			start = time.time()
			img_root_path = os.path.join(imgs_dir, folder, img)
			try:
				pic = cv2.imread(img_root_path)
				pic = model.get_input(pic)  # 3 * 112 * 112
				if type(pic) == np.ndarray:
					if not os.path.exists(os.path.join(faces_save_dst, folder)):
						os.mkdir(os.path.join(faces_save_dst, folder))
					cv2.imwrite(os.path.join(faces_save_dst, folder, folder + str(cnt)+'.jpg'), np.transpose(pic, (1,2,0))[:,:,::-1])
					cnt += 1
				end = time.time()

				interval = end - start
			except:
				continue
def test_Faces(args):
    model = face_model.FaceModel(args)
    imgs = os.listdir(args.image_path)
    flag = False
    # 设置阈值,这两个阈值用来判定该人是否是库里的人
    a = zip([args.threshold1], [args.threshold2])
    for k, v in a:
        for img in imgs:
            pic = cv2.imread(os.path.join(args.image_path, img))
            pic = model.get_input(pic)

            if pic is None:
                continue
            else:
                f1 = model.get_feature(pic)
                for i in range(faces.shape[0]):
                    cnt = 0
                    for j in range(faces.shape[1]):
                        dist = np.sqrt(np.sum(np.square(f1 - faces[i][j])))
                        # 如果与库中某人距离小于阈值1
                        if dist < k:
                            cnt += 1
                    # 如果与库中某个人的相似度大于阈值2,则证明是该人
                    if cnt >= v:
                        name = labels[i]
                        testresult.writelines(
                            os.path.join(args.image_path, img) + ' is ' +
                            name + '\n')
                        flag = True
                if flag is False:
                    print("image:" + os.path.join(args.image_path, img) +
                          'is not in the gallery, refused!' + '\n')
    testresult.close()
예제 #4
0
def main(args):
    if sys.version_info < (3, 0):
        print("Error: Python2 is slow. Use Python3 for max performance.")
    if args.detector and args.encoder:
        try:
            global g_detector, g_encoder, output_dir, is_save_face, model
            output_dir = join(CURRENT_DIR, 'output_face')
            is_save_face = int(args.is_save_face)
            g_detector = FaceDetectorModels(int(args.detector))
            g_encoder = FaceEncoderModels(int(args.encoder))
            # init insightface model
            model = face_model.FaceModel(args)
            print("Parameters: {} {} {}".format(g_detector, g_encoder,
                                                args.data_path))
            print("reading images...")

            pickle_embs = parallel_extract(args.data_path,
                                           num_worker=1,
                                           min_photo=MIN_PHOTO)

            # with open(face_embeddings_path, 'rb') as handle:
            #     load_check = pickle.load(handle)

            # X, Y = seriallize_face_encodings(face_embeddings_path, 10)
            print('finished...')
        except Exception as ex:
            print("Invalid parameter")
            print(ex)
        return
예제 #5
0
 def load_model(self):
     parser = argparse.ArgumentParser(description='face model test')
     # general
     parser.add_argument('--image-size', default='112,112', help='')
     parser.add_argument('--model',
                         default='../recognition/models/y2-iccv/model, 01',
                         help='path to load model.')
     parser.add_argument('--ga-model',
                         default='',
                         help='path to load model.')
     parser.add_argument('--gpu', default=0, type=int, help='gpu id')
     parser.add_argument(
         '--det',
         default=0,
         type=int,
         help='mtcnn option, 1 means using R+O, 0 means detect from begining'
     )
     parser.add_argument('--flip',
                         default=0,
                         type=int,
                         help='whether do lr flip aug')
     parser.add_argument('--threshold',
                         default=1.24,
                         type=float,
                         help='ver dist threshold')
     args = parser.parse_args()
     return face_model.FaceModel(args)
def main(args):

    # 因为只有一个函数用到了database,但是不是main函数直接调用这个函数,并且这个函数在其他函数里检测的的时候被使用,故而定义为全局变量跨函数调用
    global database
    model = face_model.FaceModel(args)
    print("load model")
    if args.npy:
        database = load_database_by_npy(args)
        print("load npy database")
    else:
        database = load_database(args.database_path, model)
        print("load image database")
    
    #选择工作模式
    if args.pattern  == 'camera':
        detect(args, model, camera=True)
    elif args.pattern == 'video':
        detect(args, model)
    elif args.pattern == 'image':
        detect_image(args, model)
    else:
        print("Error partten!")
        del database
        exit(0)
    del database
예제 #7
0
def compute_sim():
    model = face_model.FaceModel(args)
    out_txt = open(
        '/media/dhao/系统/05-weiwei/FR/politician_code/result_dist.txt', 'a')
    file = open('/media/dhao/系统/05-weiwei/FR/politician_code/same_pairs.txt',
                'r')
    lines = file.readlines()
    for j in range(len(lines)):
        name1 = lines[j].strip().split(',')[0]
        name2 = lines[j].strip().split(',')[1]
        try:
            img1 = cv2.imread(name1)
            img1 = model.get_input(img1)
            f1 = model.get_feature(img1)

            img2 = cv2.imread(name2)
            img2 = model.get_input(img2)
            f2 = model.get_feature(img2)

            dist = np.sqrt(np.sum(np.square(f1 - f2)))
            out_txt.writelines(name1 + ',' + name2 + ',' + ',' + str(dist) +
                               '\n')
        except:
            continue

    file.close()
    out_txt.close()
예제 #8
0
 def __init__(self, shape, model_path):
     args = edict({
         "image_size": "%d,%d" % (shape[0], shape[1]),
         "model": model_path + ",0",
         "gpu": 0,  #1,
         "threshold": 1.24,
     })
     self.model = face_model.FaceModel(args)
예제 #9
0
 def __init__(self, args):
     self.le = args.le
     self.data = pickle.loads(open(args.embeddings, "rb").read())
     self.embeddings = np.array(self.data['embeddings'])
     self.labelEncode = pickle.loads(open(self.le, "rb").read())
     self.labels = self.labelEncode.fit_transform(self.data['names'])
     self.detector = MTCNN()
     self.embedding_model = face_model.FaceModel(args)
     self.classifying_model = load_model(args.model_classify)
예제 #10
0
def video_test_Faces(args):
    start = time.time()

    model = face_model.FaceModel(args)
    # 设置阈值,这两个阈值用来判定该人是否是库里的人
    a = zip([args.threshold1], [args.threshold2])
    for k, v in a:
        cap = cv2.VideoCapture("/home/lzc274500/nohelmet_face_detect/02.mp4")
        while (True):
            ret, frame = cap.read()
            pic, bboxs = face_detecter_align(frame)

            if pic is None:  #判断是否有人脸,没有人脸返回
                continue
            else:
                for m in range(1, (pic.shape[0] / 3) + 1):
                    # print("m", m)
                    apic = pic[(3 * m - 3):m * 3]
                    abbox = bboxs[4 * m - 4:(4 * m)]
                    #apoints = points[10*m-10:10*m]
                    cv2.rectangle(frame, (int(abbox[0]), int(abbox[1])),
                                  (int(abbox[2]), int(abbox[3])), (0, 255, 0),
                                  2)
                    # for l in range(0, 5):
                    #     cv2.circle(frame, (int(apoints[l]), int(apoints[l + 5])), 2, (0, 255, 0), 2)
                    f1 = model.get_feature(apic)
                    maxl = []
                    for i in range(faces.shape[0]):
                        cnt = 0
                        for j in range(faces.shape[1]):
                            dist = np.sqrt(np.sum(np.square(f1 - faces[i][j])))
                            # 如果与库中某人距离小于阈值1
                            if dist < k:
                                cnt += 1
                        z = [i, cnt]
                        maxl = maxl + z
                    e = maxl[1::2].index(max(maxl[1::2]))
                    # 如果与库中某人距离小于阈值2
                    if max(maxl[1::2]) >= v:
                        name = labels[e]
                    else:
                        name = "unknow"
                    cv2.putText(frame, name,
                                (int(abbox[0]), int(abbox[1] - 10)),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            #out.write(frame)
            cv2.imshow("Demo", frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cap.release()  # 关闭相机
        #out.release()
        cv2.destroyAllWindows()
        #cv2.imwrite('chip_' + str(img) + '.png', draw)
    testresult.close()
    end = time.time()
    print('Running time: {} Seconds'.format(end - start))
예제 #11
0
 def __init__(self, arguments, mx_context):
     self.args = arguments
     self.ctx = mx_context
     self.model = face_model.FaceModel(args)
     self.detector = MtcnnDetector(model_folder='mtcnn-model/',
                                   ctx=self.ctx,
                                   num_worker=4,
                                   accurate_landmark=False)
     self.names = None  # Names of the persons in the dataset
     self.dataset = None  # Collection of features of known names
def val(input_image):
    model = face_model.FaceModel(args)
    img = model.get_input(input_image)
    f2 = model.get_feature(input_image)
    features = {}
    for i in range(len(f2)):
        features['feature' + str(i)] = str(f2[i])
    app_json = json.dumps(features)
    #print(app_json)
    return app_json
def img_to_vec(img):
    model = face_model.FaceModel(args)
    img = cv2.imread(img)
    img = model.get_input(img)
    if img is None:
        print('------------------No face detected or Multiple faces detected')
        return None
    else:
        print('----------------feature')
        f1 = model.get_feature(img)
        return f1.tolist()
예제 #14
0
class Recognition(Singleton):
    parser = argparse.ArgumentParser(description='face model test')
    parser.add_argument('--image-size', default='112,112', help='')
    parser.add_argument('--model', default='./insightface/models/model-r34-amf/model,0', help='path to load model.')
    parser.add_argument('--ga-model', default='', help='path to load model.')
    parser.add_argument('--gpu', default=0, type=int, help='gpu id')
    parser.add_argument('--det', default=0, type=int, help='mtcnn option, 1 means using R+O, 0 means detect from begining')
    parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
    parser.add_argument('--threshold', default=1.24, type=float, help='ver dist threshold')
    args = parser.parse_args()
    extractor = face_model.FaceModel(args)
예제 #15
0
 def __init__(self, arguments, mx_context):
     self.args = arguments
     self.ctx = mx_context
     self.model = face_model.FaceModel(args)
     rtpath, epoch = self.args.rt_model.split(',')
     self.detector = RetinaFace(rtpath, int(epoch), self.args.gpu, 'net3')
     self.dataset = None  # Collection of features of known names
     self.names = {}  # Names of known person
     self.persons = []  # List of person detected
     self.crop_resolution = int(self.args.image_size.split(',')
                                [0])  # Resolution to crop person face
     self.pv = [float(p) for p in self.args.poses.split(',')]
예제 #16
0
def test():
    model = face_model.FaceModel(args)
    for i in range(1):
        cnt = 0
        out_txt = open(
            '/media/dhao/系统/05-weiwei/FR/dataset/test_result/combine_test/1.38/result'
            + str(i + 1) + '.txt', 'a')
        file = open(
            '/media/dhao/系统/05-weiwei/FR/politician_code/same_pairs.txt', 'r')
        lines = file.readlines()
        for j in range(len(lines)):
            name1 = lines[j].strip().split(',')[0]
            name2 = lines[j].strip().split(',')[1]
            try:

                img1 = cv2.imread(name1)
                img1 = model.get_input(img1)
                f1 = model.get_feature(img1)

                img2 = cv2.imread(name2)
                img2 = model.get_input(img2)
                f2 = model.get_feature(img2)

                dist = np.sqrt(np.sum(np.square(f1 - f2)))

                if dist < args.threshold:
                    # print('they are the same one')
                    out_txt.writelines(name1 + ',' + name2 + ',' + str(dist) +
                                       ',' + str(args.threshold) + '*' * 3 +
                                       str(1) + '\n')

                else:
                    out_txt.writelines(name1 + ',' + name2 + ',' + str(dist) +
                                       ',' + str(args.threshold) + '*' * 3 +
                                       str(0) + '\n')
                    bad_case_folder = '/media/dhao/系统/05-weiwei/FR/dataset/test_result/combine_test/1.38/all' + str(
                        i) + '/' + str(cnt) + '/'
                    if not os.path.exists(bad_case_folder):
                        os.mkdirs(bad_case_folder)
                    if os.path.isdir(bad_case_folder):
                        shutil.copy(name1, bad_case_folder)
                        shutil.copy(name2, bad_case_folder)
                        cnt += 1
                    else:
                        sys.exit(0)

            except:
                continue

        file.close()
        out_txt.close()
예제 #17
0
def main(readerd, readern):
    args = parse_arguments()
    skip_num = 0

    model = face_model.FaceModel(args)

    name = []
    for row in readern:
        name.append(row[0])
    
    embedded = []
    for row in readerd:
        embedded.append(np.asarray(row, dtype=np.float32))

    conn, addr = s.accept()
    data = b""
    payload_size = struct.calcsize(">L")

    while True:
        if skip_num % args.skip_frame != 0:
            skip_num += 1
            continue
            
        while len(data) < payload_size:
            data += conn.recv(4096)

        packed_msg_size = data[:payload_size]
        data = data[payload_size:]
        msg_size = struct.unpack(">L", packed_msg_size)[0]
       
        while len(data) < msg_size:
            data += conn.recv(4096)
        frame_data = data[:msg_size]
        data = data[msg_size:]

        frame = pickle.loads(frame_data)
        frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)

        if frame is not None:
            ret = face_recognition(frame, model, embedded, name)
            if ret == 1:
                break
            print("RET = ", ret)
            data_string = pickle.dumps(ret)
            conn.send(data_string)

        skip_num += 1 

    # When everything is done, release the capture
    cv2.destroyAllWindows()
예제 #18
0
    def __init__(self):
        parser = argparse.ArgumentParser(description='face model test')
        # general
        parser.add_argument('--image-size', default='112,112', help='')
        parser.add_argument('--model', default='insightface/models/model-r100-ii/model,0', help='path to load model.')
        parser.add_argument('--ga-model', default='', help='path to load model.')
        parser.add_argument('--gpu', default=0, type=int, help='gpu id')
        parser.add_argument('--flip', default=0, type=int, help='whether do lr flip aug')
        args = parser.parse_args()
        self.embedder = face_model.FaceModel(args)

        self.face_db = FaceDatabase()
        self.features = []
        self.ids = []
예제 #19
0
 def __init__(self,
              model_path,
              epoch_num='0000',
              image_size=(112, 112),
              no_face_raise=True):
     self.model_path = ','.join([model_path, epoch_num])
     self.no_face_raise = no_face_raise
     args = argparse.Namespace()
     args.model = self.model_path
     args.det = 0
     args.flip = 0
     args.threshold = 1.24
     args.ga_model = ''
     args.image_size = ",".join([str(i) for i in image_size])
     self.model = face_model.FaceModel(args)
예제 #20
0
def compare_image():
    model = face_model.FaceModel(args)
    img1 = cv2.imread('./test_image/1.png')
    img2 = cv2.imread('./test_image/85.jpg')

    time1 = time.time()
    img1, _ = model.get_input(img1)
    img2, _ = model.get_input(img2)
    time2 = time.time()
    #f1 = model.get_feature(img)
    #print(f1[0:10])
    print(img1.shape)
    f1 = model.get_feature(img1).flatten()
    f2 = model.get_feature(img2).flatten()
    sim = sklearn.metrics.pairwise.cosine_similarity([f1, f2])
    print(sim)
    time3 = time.time()
예제 #21
0
def age_model():
    parser = argparse.ArgumentParser(description='face model test')
    # general
    parser.add_argument('--image-size', default='112,112', help='')
    parser.add_argument('--image', default='Tom_Hanks_54745.png', help='')
    parser.add_argument('--model',
                        default='model/model,0',
                        help='path to load model.')
    parser.add_argument('--gpu', default=0, type=int, help='gpu id')
    parser.add_argument(
        '--det',
        default=0,
        type=int,
        help='mtcnn option, 1 means using R+O, 0 means detect from begining')
    args = parser.parse_args()

    return face_model.FaceModel(args)
예제 #22
0
def gen_feature(args):
    #loading model
    model = face_model.FaceModel(args)
    no_get_feature = 0
    rm_dim_number = 0
    #get feature
    feature_lebal_id = {"feature": [], "lebal": [], "id": []}
    output_dir = args.output_dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    lebal_list = os.listdir(args.data_path)
    print("class number", len(lebal_list))
    for i_id in range(len(lebal_list)):
        lebal_path = os.path.join(args.data_path, lebal_list[i_id])
        if os.path.isdir(lebal_path):
            print("procesing:{}/{}".format(i_id, len(lebal_list)))
            for img in os.listdir(lebal_path):
                img_path = os.path.join(lebal_path, img)
                # print('img_path',img_path)
                try:
                    # img_RGB = cv2.imread(img_path)
                    # dim=rm_dim.rm_main(img_RGB,args.dim_threshold)
                    # if dim==1:
                    #     rm_dim_number=rm_dim_number+1
                    #     continue
                    # img_input = model.get_input(img_RGB)
                    img_rgb = misc.imread(img_path)
                    img_input = np.transpose(img_rgb, (2, 0, 1))
                    feature = model.get_feature(img_input)
                    feature_lebal_id['feature'].append(feature)
                    feature_lebal_id['lebal'].append(lebal_list[i_id])
                    feature_lebal_id['id'].append(i_id)
                except (Exception):
                    no_get_feature = no_get_feature + 1
                    print("get feature fail", img_path)
                    continue

    print('dim_images_number', rm_dim_number)
    print('fail to get feature', no_get_feature)
    print('success to get lebal number', len(feature_lebal_id['lebal']),
          len(feature_lebal_id['feature']))
    np.save(os.path.join(output_dir, "labels_name.npy"),
            feature_lebal_id['lebal'])
    np.save(os.path.join(output_dir, "gallery.npy"), feature_lebal_id['id'])
    np.save(os.path.join(output_dir, "signatures.npy"),
            feature_lebal_id['feature'])
예제 #23
0
def test_all():
    im_path = '/media/dhao/系统/05-weiwei/FR/dataset/测试集'
    file = open('/media/dhao/系统/05-weiwei/FR/dataset/bad_case.txt', 'a')
    # 人脸检测
    folders = os.listdir(im_path)
    cnt = 0
    name_list = []
    for folder in folders:
        random.seed(time.time() * 100000 % 10000)
        sublist = []
        imgs = os.listdir(os.path.join(im_path, folder))
        random.shuffle(imgs)
        for i in range(0, len(imgs), 2):
            try:
                model = face_model.FaceModel(args)
                pic_path1 = os.path.join(im_path, folder, imgs[i])
                img1 = cv2.imread(pic_path1)
                img1 = model.get_input(img1)
                f1 = model.get_feature(img1)

                pic_path2 = os.path.join(im_path, folder, imgs[i + 1])
                img2 = cv2.imread(pic_path2)
                img2 = model.get_input(img2)
                f2 = model.get_feature(img2)

                dist = np.sum(np.square(f1 - f2))
                if dist < args.threshold:
                    print('they are the same one')
                else:
                    # pdb.set_trace()
                    bad_case_folder = '/media/dhao/系统/05-weiwei/FR/dataset/test_result/' + str(
                        cnt)
                    os.mkdir(bad_case_folder)
                    shutil.copy(pic_path1, bad_case_folder)
                    shutil.copy(pic_path2, bad_case_folder)
                    cnt += 1
                    file.writelines(pic_path1 + ',' + pic_path2 + ',' +
                                    str(dist) + ',' + str(args.threshold) +
                                    '\n')
                    print('they are the different one')
                print(dist)

            except:
                continue
    file.close()
예제 #24
0
def get_emb_array(paths, args):
    insight_model = face_model.FaceModel(args)
    #num_of_images = len(paths)
    emb_array = []
    cnt = 1
    for path in paths:
        if (cnt % 50 == 0):
            print(str(cnt) + '     path:' + path)
        cnt += 1
        img = cv2.imread(path)
        img_tem = insight_model.get_input(img)
        if img_tem is not None:
            img = img_tem
        else:
            img = np.transpose(img, (2, 0, 1))

        emb_array.append(insight_model.get_feature(img))

    emb_array = np.array(emb_array)
    return emb_array
def main(args):
    arcface_model = face_model.FaceModel(args)
    gender_model = 'gender/face_model.pkl'
    with open(gender_model, 'rb') as f:
        clf, labels = pickle.load(f, encoding='latin1')
    video_capture = cv2.VideoCapture(0)
    classifier_path = "trained_classifier/classifier.pkl"
    with open(classifier_path, 'rb') as f:
        (model, class_names) = pickle.load(f)
        print("Loaded classifier file")

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=.7)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            # Bounding box
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, "align")

    # Using two threads, one for recognition, the other for detecton, tracking and showing the results
    p1 = ThreadWithReturnValue(target=detect_align_recognize,
                               args=(
                                   video_capture,
                                   pnet,
                                   rnet,
                                   onet,
                                   model,
                                   class_names,
                                   clf,
                                   labels,
                                   arcface_model,
                               ))
    p2 = ThreadWithReturnValue(target=track_and_show,
                               args=(video_capture, ))  #, clf, labels

    p1.start()
    p2.start()

    p1.join()
    p2.join()
예제 #26
0
    def load_model(self):
        self.db = DBManagement()
        # Initialize our lists of extracted facial embeddings and corresponding people names
        self.features = []
        self.ids = []
        #self.genders = []
        #self.ages = []
        #self.clusters = []
        #self.fileNames = []

        parser = argparse.ArgumentParser()
        parser.add_argument('--image-size', default='112,112', help='')
        parser.add_argument(
            '--model',
            default='insightface/models/model-y1-test2/model,0',
            help='path to load model.')
        parser.add_argument('--ga-model',
                            default='insightface/models/gamodel-r50/model,0',
                            help='path to load model.')
        parser.add_argument('--gpu', default=0, type=int, help='gpu id')
        parser.add_argument(
            '--det',
            default=0,
            type=int,
            help='mtcnn option, 1 means using R+O, 0 means detect from begining'
        )
        parser.add_argument('--flip',
                            default=0,
                            type=int,
                            help='whether do lr flip aug')
        parser.add_argument('--threshold',
                            default=1.24,
                            type=float,
                            help='ver dist threshold')
        args = parser.parse_args()

        # Initialize the faces embedder
        self.model = face_model.FaceModel(args)
예제 #27
0
def main(args):
    model = face_model.FaceModel(args)
    video_capture = cv2.VideoCapture(0)

    # init load
    img2 = cv2.imread('unknown.jpg')
    img2 = model.get_input(img2)
    features2 = model.get_feature(img2)

    while True:
        # Grab a single frame of video
        ret, frame = video_capture.read()

        # Resize frame of video to 1/4 size for faster face recognition processing
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        bounding_boxes, landmarks, face_img = model.get_bbox_and_landmarks(
            small_frame)
        bounding_boxes = bounding_boxes.astype(int)

        features1 = model.get_feature(face_img)
        # compare_faces
        # TODO

        for b in bounding_boxes:
            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
            b *= 4
            cv2.rectangle(frame, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 1)

        cv2.imshow('Video', frame)

        # Hit 'q' on the keyboard to quit!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    video_capture.release()
    cv2.destroyAllWindows()
예제 #28
0
def main(args):
    model = face_model.FaceModel(args)
    # 打开文件
    dir0 = os.listdir(args.image_path)
    count = len(dir0)
    for id in dir0:
        count -= 1
        if count%3000 == 0:
            print('last num:', count)
        name_path = os.path.join(args.image_path, id)
        name_files = get_all_path(name_path)
        if len(name_files) < 4:
            shutil.rmtree(name_path)
            # print(args.image_path + id, 'is < 1 size')
            continue
        else:
            feature = []
            img_index = []
            for index in range(len(name_files)):
                file_path = name_files[index][0][1]["path"]
                img = cv2.imread(file_path)
                if img.ndim<2:
                    os.remove(file_path)
                    continue
                # print(file_path)
                aligned = model.get_input(img)
                if aligned is None:
                    os.remove(file_path)
                    continue
                else:
                    f1 = model.get_feature(aligned)
                    feature.append(f1)
                    img_index.append(file_path)
            review_img(feature, img_index)
            name_files = os.listdir(name_path)
            if len(name_files) < 2: # 清洗后的数量小于20删除该文件夹
                shutil.rmtree(name_path)
예제 #29
0
def main():
    args = parse_arguments()

    model = face_model.FaceModel(args)

    path = args.data_img
    name = [x for x in os.listdir(path) if x != '.DS_Store']
    files = [os.path.join(path, f) for f in name]

    with open('Data/data.csv', 'w') as csvData:
        with open('Data/name.csv', 'w') as csvName:

            writerData = csv.writer(csvData)
            writerName = csv.writer(csvName)

            for i in range(len(files)):
                file = files[i]
                img_names = os.listdir(file)
                img_paths = [os.path.join(file, f) for f in img_names]

                for img_path in img_paths:
                    print(img_path)
                    img = cv2.imread(img_path)
                    if img is None:
                        continue
                    try:
                        F1, _ = model.get_input(img)
                    except:
                        continue
                    f1 = model.get_feature(F1[0])

                    writerData.writerow(f1)
                    writerName.writerow([name[i]])

        csvData.close()
        csvName.close()
예제 #30
0
parser.add_argument(
    '--det',
    default=0,
    type=int,
    help='mtcnn option, 1 means using R+O, 0 means detect from begining')
parser.add_argument('--flip',
                    default=0,
                    type=int,
                    help='whether do lr flip aug')
parser.add_argument('--threshold',
                    default=1.24,
                    type=float,
                    help='ver dist threshold')
args = parser.parse_args()

model = face_model.FaceModel(args)
img = cv2.imread('a.png')
img = model.get_input_aligned(img)
# aligned = np.transpose(img, (1, 2, 0))
# aligned = cv2.cvtColor(aligned, cv2.COLOR_RGB2BGR)
# img = cv2.imwrite('a.png', aligned)
# f1 = model.get_ga(img)
# print(f1)
#print(f1[0:10])
gender, age = model.get_ga(img)
print(gender)
print(age)
# sys.exit(0)
# img = cv2.imread('a.png')
# img = model.get_input(img)
# f2 = model.get_gender(img)