示例#1
0
def main(args):
    train_x = []
    train_y = []
    face_recognition = Face.Recognition()
    data, keys = load_data('./train_data')

    if args.debug:
        print("Debug enabled")
        Face.debug = True

    temp = 0
    for index, name in enumerate(keys):
        # name = keys[index]
        for img in data[name]:
            face = face_recognition.add_identity(img, person_name=name)
            if face is not None:
                # print face.embedding
                # print face.name
                train_x.append(face.embedding)
                train_y.append(keys.index(face.name))
                # train_x = np.concatenate([train_x, face.embedding]) if train_x is not None else face.embedding
                # train_y = np.concatenate([train_y, face.name]) if train_y is not None else face.name
        print('INFO: {} has {} images available'.format(
            name,
            len(train_x) - temp))
        temp = len(train_x)
    train_x = np.array(train_x).reshape(-1, 128)
    train_y = np.array(train_y)
    # print(train_x.shape)
    # print(train_y.shape)
    train_boundary(train_x, 'boundary.model')
    train_classifier(train_x, train_y, keys, '1208.pkl')
示例#2
0
def main():
    personList = []
    face_recognition = face.Recognition(min_face_size=20)
    for root, dirs, files in walk("./data/frk/"):
        personitem = Person()
        personitem.name = root
        for file in files:
            cvframe = cv2.imread(root + "/" + file)
            personImage = PersonImage()
            personImage.image = cvframe
            personImage.name = file
            personitem.personImage.append(personImage)
        personList.append(personitem)

    file = open("faruk.txt", "w")
    for personitem in personList:
        print(personitem.name)
        embedding = []
        for peronsimage in personitem.personImage:
            test_face = face_recognition.add_identity_3(peronsimage.image)
            embedding.append(test_face)
            dist2 = np.linalg.norm(test_face - embedding[0])
            print('-----------------')
            print(personitem.name + '#---#' + peronsimage.name + '#---#' +
                  str(dist2))
            file.write(personitem.name + '#---#' + peronsimage.name + '#---#' +
                       str(dist2))
            file.write("\n")
            print('-----------------')
    file.close()
def job0():
    input_dir = './save_image'
    output_dir = './data'
    face_recognition = face.Recognition()
    if not os.path.exists(input_dir):
        return False
    saved_list = []
    for root, dirs, files in os.walk(input_dir):
        for dir in dirs:
            # print(file)
            saved_list.append(os.path.join(root, dir))
    print(saved_list)
    if len(saved_list) == 0:
        return False
    for dir in saved_list:
        for root, dirs, files in os.walk(dir):
            saved_imgs_lens = len(files)
            print('文件数', saved_imgs_lens)
            print(folder_vs_classifier(dir, face_recognition))
            if saved_imgs_lens >= 40 and folder_vs_classifier(
                    dir, face_recognition):
                print('新用户', dir)
                new_dir = os.path.join(output_dir, dir.split('/')[-1])
                if not os.path.exists(new_dir):
                    os.mkdir(new_dir)
                print(new_dir)
                move_files(dir, new_dir)
                print('复制文件夹成功')
                shutil.rmtree(dir)
                print('复制后删除文件夹')
                return True
            else:
                print('直接删除文件夹')
                shutil.rmtree(dir)
                return False
示例#4
0
def catch(name):
    classfier = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
    color = (0, 255, 0)
    recognition = face.Recognition(1)
    cap=cv2.VideoCapture(0)#开启摄像头
    while cap.isOpened():
    	
        ret,frame=cap.read()#读一帧
        grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faceRects = classfier.detectMultiScale(grey, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))#人脸的框
        if len(faceRects) > 0:
            for faceRect in faceRects:
                x, y, w, h = faceRect
                cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, 2)


        cv2.imshow("detect", frame)
        c = cv2.waitKey(30)
        if c & 0xFF == ord('c'):  # 收集
            save_face = recognition.add_identity(frame, name)
            if save_face:

                cv2.imwrite("C:\\Users\\xiaomiao\\Desktop\\facenet\\face_lib\\%s.jpg"%name, frame)
                np.save("C:\\Users\\xiaomiao\\Desktop\\facenet\\face_lib\\%s.npy" % name+str(n), save_face.embedding)
               
                print(save_face.embedding,save_face.embedding.shape)
                print("采集成功!")
            else:
                print("采集失败,再来一次")
            break
        if c & 0xFF == ord('q'):  # 释放
             
            break
    cap.release()
    cv2.destroyAllWindows()
示例#5
0
def main():
    image = cv2.imread('../images/1-2.jpg')
    frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    face_recognition = face.Recognition()
    faces = face_recognition.identify(frame)
    add_overlays(image, faces)
    cv2.imwrite('../images/show.jpg', image)
示例#6
0
def main():
    personList = []
    face_recognition = face.Recognition(min_face_size=20)
    for root, dirs, files in walk("./data/frk"):
        personitem = Person()
        personitem.name = root
        for file in files:
            print(root + "/" + file)
            cvframe = cv2.imread(root + "/" + file)
            personImage = PersonImage()
            personImage.image = cvframe
            personImage.name = file
            personitem.personImage.append(personImage)
        personList.append(personitem)

    for personitem in personList:
        for peronsimage in personitem.personImage:
            print('-----------------')
            img = face_recognition.getFace(peronsimage.image)
            if img != -1:
                cv2.imwrite(peronsimage.name, img[0].image)
                cv2.imshow('ssdf', img[0].image)
                cv2.waitKey(0)
                cv2.destroyAllWindows()
            else:
                print(peronsimage.name)
def pretreatment_saving_embedding():
    path_for_all_pic2 = os.path.dirname(__file__) + "/../data/images/data_set/"
    path_for_all_pic = os.listdir(path_for_all_pic2)
    print(path_for_all_pic)
    tool = face.Recognition()
    a = []
    for i in path_for_all_pic:
        img = misc.imread(path_for_all_pic2 + i, mode='RGB')
        tmp = tool.identify2(
            img)  #这个tool.identify里面进行了detect,align和reshape到160*160
        a.append(tmp[0].embedding)
    tmp = a
    tmp2 = path_for_all_pic

    #pickle 进去

    output = open('data.pkl', 'wb')

    # Pickle dictionary using protocol 0.
    pickle.dump(tmp, output)

    # Pickle the list using the highest protocol available.
    pickle.dump(tmp2, output)

    output.close()

    pkl_file = open('data.pkl', 'rb')
    data1 = pickle.load(pkl_file)

    data2 = pickle.load(pkl_file)
    print(data1)
    print(data2)
    pkl_file.close()
示例#8
0
def main(args):
    frame_interval = 30  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0

    if args.debug:
        print("Debug enabled")
        face.debug = True

    video_capture = cv2.VideoCapture(0)
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
    video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
    video_capture.set(cv2.CAP_PROP_FPS, 30)

    facenet_model_checkpoint = os.path.dirname(__file__) + args.model
    classifier_model = os.path.dirname(__file__) + args.classifier
    face_recognition = face.Recognition(facenet_model_checkpoint,
                                        classifier_model,
                                        min_face_size=20)
    start_time = time.time()

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()
        if ret == 0:
            print("Error: check if webcam is connected.")
            return

        faces = face_recognition.identify(frame)

        if (frame_count % frame_interval) == 0:
            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        new_frame = add_overlays(frame.copy(), faces, frame_rate)

        frame_count += 1
        cv2.imshow(window_name, new_frame)

        keyPressed = cv2.waitKey(1) & 0xFF
        if keyPressed == 27:  # ESC key
            break
        elif keyPressed == 13:  # ENTER key
            cv2.imwrite(
                window_name + "_" +
                datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + ".jpg",
                frame)
            print('Screenshot saved!')

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
示例#9
0
def main(args):
    frame_interval = 1  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0
    
    video_capture = cv2.VideoCapture("test4.mp4")
    face_recognition = face.Recognition()
    start_time = time.time()
    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('output2.avi',fourcc, 20.0, (int(video_capture.get(3)),int(video_capture.get(4))),True)



    if args.debug:
        print("Debug enabled")  
        face.debug = True

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        if (frame_count % frame_interval) == 0:
            faces = face_recognition.identify(frame)

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        
            
        
        add_overlays(frame, faces, frame_rate)

        frame_count += 1
        #gray = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

        #cv2.imshow('gray',gray)
        out.write(frame)
            #print 'a'
        



        cv2.imshow('Vedio', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    out.release()
    cv2.destroyAllWindows()
def main(args):
    frame_interval = 3  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0
    tolerance = .7

    video_capture = cv2.VideoCapture(0)
    face_recognition = face.Recognition()
    img_folder = 'c:/Users/ngminh/Documents/GITHUB/facenet/contributed/images'

    start_time = time.time()

    if args.debug:
        print("Debug enabled")
        face.debug = True

    known_faces = []

    for name in os.listdir(img_folder):
        #print(name)
        person_img = load_image_file(img_folder + '/' + name)
        person_face = face_recognition.add_identity(person_img,
                                                    name.replace('.jpg',
                                                                 ''))[0]
        known_faces.append(person_face)

    # me_img = load_image_file('./images/me1.jpg')
    # duong_img = load_image_file('./images/duong.jpg')
    # me_face = face_recognition.add_identity(me_img, 'Minh Ng')[0]
    # duong_face = face_recognition.add_identity(duong_img, 'Duong Ng')[0]
    # known_faces = [me_face, duong_face]

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        if (frame_count % frame_interval) == 0:
            faces = face_recognition.identify(frame, known_faces, tolerance)

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        add_overlays(frame, faces, frame_rate)
        frame_count += 1
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
def main(args):
    ###############################
    # import socket
    # Start a socket listening for connections on 0.0.0.0:8000 (0.0.0.0 means
    # all interfaces)
    # server_socket = socket.socket()
    # server_socket.bind(('0.0.0.0', 8000))
    # server_socket.listen(0)
    # connection = server_socket.accept()[0].makefile('rb')
    # print(connection)
    ############################
    frame_interval = 3  # Number of frames after which to run face detection
    fps_display_interval = 20  # seconds
    # fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0

    # video_capture = cv2.VideoCapture(0) # Original
    video_capture = cv2.VideoCapture(0)  # Original
    print("##video_capture:", video_capture)
    face_recognition = face.Recognition()
    start_time = time.time()
    if args.debug:
        print("Debug enabled")
        face.debug = True

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        # print('While2') # Modified
        # print("frame : ", frame) # Modified
        # print("frame.shape : ", frame.shape) # Modified
        if (frame_count % frame_interval) == 0:

            faces, accuracys = face_recognition.identify(frame)
            print("in def main(args)", faces, accuracys)

            # print("faces : ", faces) # Modified

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0
        add_overlays(frame, faces, accuracys, frame_rate)
        frame_count += 1
        cv2.imshow('Video', frame)
        # break # Modified
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
示例#12
0
def main(args):
    frame_interval = 3  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0

    # if args.debug:
    #     print("Debug enabled")
    #     face.debug = True

    # load the known faces and embeddings
    print("[INFO] loading encodings...")
    print(args["encodings"])
    data = pickle.loads(open(args["encodings"], "rb").read())
    names = []
    encodings = []
    for name in data['names']:
        names.append(name)
    for encoding in data['encodings']:
        encodings.append(encoding)
    sz = len(encodings)
    mtx_encodings = np.transpose(np.array(encodings))

    video_capture = cv2.VideoCapture(0)
    face_recognition = face.Recognition()
    start_time = time.time()

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        if (frame_count % frame_interval) == 0:
            faces = face_recognition.embedding(frame)

            for face1 in faces:
                face_identify(sz, names, mtx_encodings, face1)

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        add_overlays(frame, faces, frame_rate)

        frame_count += 1
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
示例#13
0
def main(args):

    images, cout_per_image, nrof_samples = load_and_align_data(
        args.image_files, args.image_size, args.margin,
        args.gpu_memory_fraction)
    with tf.Graph().as_default():

        with tf.Session() as sess:

            # Load the model
            facenet.load_model(args.model)
            face_recognition = face.Recognition()
            frame = cv2.imread(str(args.image_files[0]), 1)
            frame = cv2.resize(frame, (0, 0), fx=0.5,
                               fy=0.5)  #resize frame (optional)
            faces = face_recognition.identify(frame)

            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")

            # Run forward pass to calculate embeddings
            feed_dict = {
                images_placeholder: images,
                phase_train_placeholder: False
            }
            emb = sess.run(embeddings, feed_dict=feed_dict)
            classifier_filename_exp = os.path.expanduser(
                args.classifier_filename)
            with open(classifier_filename_exp, 'rb') as infile:
                (model, class_names) = pickle.load(infile)
            #print('Loaded classifier model from file "%s"\n' % classifier_filename_exp)
            predictions = model.predict_proba(emb)
            best_class_indices = np.argmax(predictions, axis=1)
            best_class_probabilities = predictions[
                np.arange(len(best_class_indices)), best_class_indices]
            num_exactly = round(best_class_probabilities[0] * 100, 2)
            k = 0
            #print predictions
            for i in range(nrof_samples):
                print("\npeople in image %s :" % (args.image_files[i]))
                for j in range(cout_per_image[i]):
                    print('%s: %.3f' % (class_names[best_class_indices[k]],
                                        best_class_probabilities[k]))
                    k += 1
            add_overlays(frame, faces)
            cv2.imshow("lkk", frame)
            cv2.imwrite("result.png", frame)
            cv2.waitKey(0)
            cv2.destroyAllWindows()
示例#14
0
def main(args):

    face_recognition = face.Recognition()

    if args.debug:
        print("Debug enabled")
        face.debug = True
    im = tl.vis.read_image(args.input_image)
    faces = face_recognition.identify(im)
    print(faces[0].align_time,faces[0].net_time,faces[0].class_time)
    print(faces[0].confidence,faces[0].name)
示例#15
0
def main(args):
    # face detection이 실행될 때 프레임의 수 정의
    frame_interval = 5  # Number of frames after which to run face detection
    # fps가 출력되는 interval
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0

    # Opencv함수로 영상 정의
    video_capture = cv2.VideoCapture(0)
    # face.Recognition() 클래스 정의
    face_recognition = face.Recognition()
    # 시작시간 정의
    start_time = time.time()
    if args.debug:
        print("Debug enabled")
        face.debug = True

    # 영상이 돌기 시작
    while True:
        # Capture frame-by-frame
        # 영상을 읽은 프레임 별로 ret, frame에 정의
        ret, frame = video_capture.read()

        if (frame_count % frame_interval) == 0:
            faces = face_recognition.identify(frame)
            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0
        print('number of face', len(faces))  # Modified
        # frame, faces, frame_rate를 overlay하기 위해 변수를 담아준다
        add_overlays(frame, faces, frame_rate)
        frame_count += 1
        # frame 출력
        cv2.imshow('Video', frame)
        tmp = random.randint(1, 10)
        if tmp == 5:
            age, gender = age_gender_estimate.age_gender_estimate()
            # sql = " INSERT {}, {} TO TABLE "
            # cur.excute(sql)
            print("나이, 성별 = ", age, gender)
            return age, gender

        # 키 입력하면 break한다
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
示例#16
0
def getfaceembendding():
    content = request.json
    if not content['image']:
        return http_error_result("Image is required.", "1005")
    else:
        base64_image = content['image']
        img = base64.b64decode(base64_image)
        npimg = np.fromstring(img, dtype=np.uint8)
        source = cv2.imdecode(npimg, 1)

        face_recognition = face.Recognition(min_face_size=20)
        test_face = face_recognition.add_identity(source, 'test_1')
        return jsonify(results=test_face.embedding.tolist())
示例#17
0
def main(args):

    face_recognition = face.Recognition()

    correctPrediction = 0
    inCorrectPrediction = 0
    sumConfidence = 0.0

    if args.debug:
        print("Debug enabled")
        face.debug = True

    path_exp = os.path.expanduser(args.input_dir)
    classes = [path for path in os.listdir(path_exp) \
               if os.path.isdir(os.path.join(path_exp, path))]
    classes.sort()
    nrof_classes = len(classes)
    results = {'person_name': [], 'p_person_name': [], 'ailgn_time': [], 'network_time': [], 'predictions_time': [],
               'confidence': []}
    for i in range(nrof_classes):
        class_name = classes[i]
        facedir = os.path.join(path_exp, class_name)
        valid_lr_img_list = sorted(tl.files.load_file_list(path=facedir, regx='.*.png', printable=False))
        valid_lr_imgs = tl.vis.read_images(valid_lr_img_list, path=facedir, n_threads=4)
        for im in valid_lr_imgs:
            faces = face_recognition.identify(im)
            results['ailgn_time'].append(faces[0].align_time)
            results['network_time'].append(faces[0].net_time)
            results['predictions_time'].append(faces[0].class_time)
            results['confidence'].append(faces[0].confidence)
            results['person_name'].append(class_name)
            results['p_person_name'].append(faces[0].name)
            if class_name == faces[0].name :
                correctPrediction += 1
            else:
                inCorrectPrediction += 1
    Accuracy = float(correctPrediction) / (correctPrediction + inCorrectPrediction)
    Avg_Confidence = float(sumConfidence) / (correctPrediction + inCorrectPrediction)
    results['ailgn_time'].append('correctPrediction:' + str(correctPrediction))
    results['network_time'].append('inCorrectPrediction:' + str(inCorrectPrediction))
    results['predictions_time'].append('Accuracy:' + str(Accuracy))
    results['confidence'].append('Avg_Confidence:' + str(Avg_Confidence))
    results['person_name'].append('accuracy')
    results['p_person_name'].append('accuracy')
    dataname = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    data_frame = pd.DataFrame(
        data={'person_name': results['person_name'], 'p_person_name': results['p_person_name'],
              'ailgn_time': results['ailgn_time'],
              'network_time': results['network_time'], 'predictions_time': results['predictions_time'],
              'confidence': results['confidence']})
    data_frame.to_csv(args.input_dir + '/results_' + dataname + '.csv')
def main(args):
    face_recognition = face.Recognition()
    start_time = time.time()
    for img_file in os.listdir(args.facedir):
        filepath = os.path.abspath(args.facedir + '/' + img_file)
        frame = cv2.imread(filepath)
        face_embedding = face_recognition.faceencoder(frame)
        embedding_file = open(img_file.split('.jpg')[0], 'w')
        if len(face_embedding) > 1:
            raise Exception("检测到的人数过多, ", filepath)
        if len(face_embedding) == 1:
            for sumber in face_embedding[0]:
                embedding_file.write(str(sumber) + ' ')
        embedding_file.close()
示例#19
0
def main(args):
    """Course ID"""
    course_id: int = args.course_id
    """Camera Options"""
    frame_interval = 3  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_interval = 3
    fps_display_interval = 5

    frame_rate = 0
    frame_count = 0

    video_capture = cv2.VideoCapture(0)
    face_recognition = face.Recognition()
    start_time = time.time()

    if args.debug:
        print("Debug enabled")
        face.debug = True

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        if (frame_count % frame_interval) == 0:
            faces = face_recognition.identify(frame)

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0
        """ Draw face boundings """
        add_overlays(frame, faces, frame_rate)
        """ Send the face labels to RabbitMQ """
        send_labels(faces, course_id)

        frame_count += 1
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            """ Send quit message"""
            send_quit(course_id)
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
示例#20
0
def verification():
    content = request.json
    base64_image = content['image']
    embedding = content['emb']
    testarray = ast.literal_eval(embedding)
    arr = np.array(testarray)
    img = base64.b64decode(base64_image)
    npimg = np.fromstring(img, dtype=np.uint8)
    source = cv2.imdecode(npimg, 1)

    face_recognition = face.Recognition(min_face_size=20)
    test_face = face_recognition.identify(source, arr)
    print(test_face)
    print('test face')
    return jsonify(results=test_face[0].dist)
def main(args):
    frame_interval = 3  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0

    videoFileName = "video.mp4"
    treatedFileName = "output.mp4"
    face_recognition = face.Recognition()
    start_time = time.time()
    cap = cv2.VideoCapture(videoFileName)

    #Get the FPS to reconstruct the video at the rigth speed
    fpsValue = cap.get(cv2.CAP_PROP_FPS)
    frameCount = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    frameHeight = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    frameWidth = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    out = cv2.VideoWriter(apiPreference=0,
                          filename=treatedFileName,
                          fourcc=cv2.VideoWriter_fourcc(*'MP4V'),
                          fps=fpsValue,
                          frameSize=(round(frameWidth), round(frameHeight)))

    if args.debug:
        print("Debug enabled")
        face.debug = True

    i = 0
    while (cap.isOpened()):
        # Capture frame-by-frame
        i = i + 1
        ret, frame = cap.read()
        if ret == False:
            break
        if (frame_count % frame_interval) == 0:
            faces = face_recognition.identify(frame)

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        add_overlays(frame, faces, frame_rate)

        frame_count += 1
        out.write(frame)
示例#22
0
def main(args):
    global face_recognition
    global face_det
    frame_interval = 2  # Number of frames after which to run face detection
    fps_display_interval = 3  # seconds
    frame_rate = 0
    frame_count = 0

    video_capture = cv2.VideoCapture(0)
    face_recognition = face.Recognition()
    start_time = time.time()
    video_capture.set(28, 0)
    #if args.debug:

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()
        #img = cv2.imread(frame)

        if (frame_count % frame_interval) == 0:
            Thread(target=face_reg_wrapper, args=(frame, )).start()
            #print(face_det)
            #faces = face_recognition.identify(frame)

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        add_overlays(frame, face_det, frame_rate)

        frame_count += 1
        cv2.imshow('Video', frame)
        k = cv2.waitKey(1)
        if k == ord('q'):
            break
        elif k == ord('r'):
            Thread(target=retrain_wrapper, verbose=True).start()
            continue
        elif k == ord('d'):
            current_person()
            continue

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
示例#23
0
def main(args):
    frame_interval = 3  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0

    video_capture = cv2.VideoCapture(0)
    video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    face_recognition = face.Recognition()
    object_detection = object.Detection()
    start_time = time.time()

    if args.debug:
        print("Debug enabled")
        face.debug = True

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        # object_detection.find_objects(frame)
        object_detection.track_person(frame)
        if (frame_count % frame_interval) == 0:
            # object_detection.find_objects(frame)
            # faces = face_recognition.identify(frame)

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        # frame = add_overlays(frame, faces, frame_rate)

        frame_count += 1
        cv2.namedWindow('Video', cv2.WINDOW_NORMAL)
        cv2.setWindowProperty('Video', cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
def main(args):
    frame_interval = 3  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0

    video_capture = cv2.VideoCapture(0)
    face_recognition = face.Recognition()
    start_time = time.time()

    if args.debug:
        print("Debug enabled")
        face.debug = True

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        if (frame_count % frame_interval) == 0:
            faces = face_recognition.identify(frame)

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        add_overlays(frame, faces, frame_rate)

        k = cv2.waitKey(1)
        if k % 256 == 27:
            # ESC pressed
            print("Escape hit, closing...")
            break

        frame_count += 1
        cv2.namedWindow("Video", cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty("Video", cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
示例#25
0
def getfaceembendding():
    content = request.json
    if not content['image']:
        return http_error_result("Image is required.", "1005")
    else: 
        imageByte =[]
        base64_image=content['image']
        for i, imglst in enumerate(base64_image):
            img = base64.b64decode(imglst)
            npimg = np.fromstring(img, dtype=np.uint8)
            source = cv2.imdecode(npimg, 1)
            imageByte.append(source)    
        face_recognition = face.Recognition(min_face_size=20)
        test_face = face_recognition.add_identity(imageByte)
        lst = np.array(test_face).tolist()
        return jsonify(results = lst)
def main():
    testdata_path = '../images'
    face_recognition = face.Recognition()
    start_time = time.time()
    for images in os.listdir(testdata_path):
        print(images)
        filename = os.path.splitext(os.path.split(images)[1])[0]
        file_path = testdata_path + "/" + images
        image = cv2.imread(file_path)
        frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        faces = face_recognition.identify(frame)
        add_overlays(image, faces)
        cv2.imwrite('../images_result/' + filename + '.jpg', image)
    end_time = time.time()
    spend_time = float('%.2f' % (end_time - start_time))
    print('spend_time:', spend_time)
示例#27
0
def change_avatar():
    status = 'fail'
    file = request.files['file']
    filename = file.filename
    img = Image.open(file)
    im = cv2.cvtColor(np.asarray(img),cv2.COLOR_RGB2BGR)
    if file :
        recognition = face.Recognition(1)
        print(filename)
        img.save(os.path.join('C:\\Users\\xiaomiao\\Desktop\\facenet\\face_lib','%s.png'%filename))
        save_face = recognition.add_identity(im, filename)
        if save_face:
            cv2.imwrite("C:\\Users\\xiaomiao\\Desktop\\facenet\\face_lib\\%s.jpg"%filename, im)
            np.save("C:\\Users\\xiaomiao\\Desktop\\facenet\\face_lib\\%s.npy" % filename, save_face.embedding)
            status = 'success'
    return render_template('success.html',data = status ,people = filename)
示例#28
0
def main(args):
    frame_interval = 5  # Number of frames after which to run face detection
    fps_display_interval = 20  # seconds
    # fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0

    video_capture = cv2.VideoCapture(
        "http://192.168.0.101:8091/?action=stream")  # Original
    face_recognition = face.Recognition()
    start_time = time.time()
    if args.debug:
        print("Debug enabled")
        face.debug = True

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        # print('While2') # Modified
        # print("frame : ", frame) # Modified
        # print("frame.shape : ", frame.shape) # Modified
        if (frame_count % frame_interval) == 0:

            # faces, accuracys = face_recognition.identify(frame) # Modified
            faces = face_recognition.identify(frame)
            # accuracy have to be Class object
            print("in def main(args)", faces)

            # print("faces : ", faces) # Modified

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0
        add_overlays(frame, faces, frame_rate)
        frame_count += 1
        cv2.imshow('Video', frame)
        # break # Modified
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
def main(args):
    frame_interval = 3  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0

    video_capture = cv2.VideoCapture(0)
    face_recognition = face.Recognition()
    start_time = time.time()
    outputFile = "result.avi"
    vid_writer = cv2.VideoWriter(
        outputFile, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,
        (round(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
         round(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))))

    if args.debug:
        print("Debug enabled")
        face.debug = True
    COLORS = np.random.randint(0, 255, size=(3, 3), dtype="uint8")
    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        if (frame_count % frame_interval) == 0:
            faces = face_recognition.identify(frame)

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        add_overlays(frame, faces, frame_rate, COLORS)

        # Write the frame with the detection boxes
        vid_writer.write(frame.astype(np.uint8))

        frame_count += 1
        cv2.imshow('Realtime Regconition', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
示例#30
0
def main(args):

    # Create an output movie file (make sure resolution/frame rate matches input video!)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    output_movie = cv2.VideoWriter('output.avi', fourcc, 29.97, (640, 360))

    frame_interval = 3  # Number of frames after which to run face detection
    fps_display_interval = 5  # seconds
    frame_rate = 0
    frame_count = 0

    video_capture = cv2.VideoCapture(
        "The+Putin+Interviews+%7C+Vladimir+Putin+vs.+Oliver+Stone+%7C+SHOWTIME+Documentary.mp4"
    )
    length = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
    face_recognition = face.Recognition()
    start_time = time.time()

    if args.debug:
        print("Debug enabled")
        face.debug = True

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()

        if (frame_count % frame_interval) == 0:
            faces = face_recognition.identify(frame)

            # Check our current fps
            end_time = time.time()
            if (end_time - start_time) > fps_display_interval:
                frame_rate = int(frame_count / (end_time - start_time))
                start_time = time.time()
                frame_count = 0

        add_overlays(frame, faces, frame_rate)

        frame_count += 1
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()