コード例 #1
0
def testfromcamera(chkpoint):

    camera = cv2.VideoCapture(0)
    haar = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    pathlabelpair, indextoname = getfileandlabel('./image/trainfaces')
    output = myconv.cnnLayer(len(pathlabelpair))
    #predict = tf.equal(tf.argmax(output, 1), tf.argmax(y_data, 1))
    predict = output

    saver = tf.train.Saver()
    with tf.Session() as sess:
        #sess.run(tf.global_variables_initializer())
        saver.restore(sess, chkpoint)

        n = 1
        while 1:
            if (n <= 20000):
                print('It`s processing %s image.' % n)
                # 读帧
                success, img = camera.read()

                gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                faces = haar.detectMultiScale(gray_img, 1.3, 5)
                for f_x, f_y, f_w, f_h in faces:
                    face = img[f_y:f_y + f_h, f_x:f_x + f_w]
                    face = cv2.resize(face, (IMGSIZE, IMGSIZE))
                    #could deal with face to train
                    test_x = np.array([face])
                    test_x = test_x.astype(np.float32) / 255.0

                    res = sess.run([predict, tf.argmax(output, 1)],\
                                   feed_dict={myconv.x_data: test_x,\
                                   myconv.keep_prob_5:1.0, myconv.keep_prob_75: 1.0})
                    print(res)

                    cv2.putText(img, indextoname[res[1][0]], (f_x, f_y - 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)  #显示名字
                    img = cv2.rectangle(img, (f_x, f_y),
                                        (f_x + f_w, f_y + f_h), (255, 0, 0), 2)
                    n += 1
                cv2.imshow('img', img)
                key = cv2.waitKey(30) & 0xff
                if key == 27:
                    break
            else:
                break
    camera.release()
    cv2.destroyAllWindows()
コード例 #2
0
ファイル: tensorflow_face.py プロジェクト: davidchen6/tf_face
def testfromcamera(chkpoint):
    #camera = cv2.VideoCapture(0)
    haar = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    #generateface([['./image/testimages', './image/testfaces']])
    pathlabelpair, indextoname = getfileandlabel('./image/trainfaces')
    print(pathlabelpair)
    print(indextoname)
    output = myconv.cnnLayer(len(pathlabelpair))
    #output = myconv.cnnLayer(4)
    #predict = tf.equal(tf.argmax(output, 1), tf.argmax(y_data, 1))
    predict = output

    saver = tf.train.Saver()
    with tf.Session() as sess:
        #sess.run(tf.global_variables_initializer())
        saver.restore(sess, chkpoint)

        n = 1
        while (n <= 4):
            if (n <= 4):
                print('It`s processing %s image.' % n)
                # 读帧
                #success, img = camera.read()
                #img = cv2.imread(os.path.join('./image/testimages/david/', 'me_' + str(n+200) + '.jpg'))
                #img = cv2.imread(os.path.join('./image/testimages/songpeng/', 'songpeng' + str(n+12) + '.jpg'))
                img = cv2.imread(
                    os.path.join('./image/testimages/Ariel_Sharon/',
                                 'Ariel_Sharon_00' + str(n + 73) + '.jpg'))
                #img = cv2.imread(os.path.join('./image/testimages/Fan_Bingbing/', 'fbb' + str(n+21) + '.jpg'))
                gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                #gray_img=cv2.equalizeHist(gray_img)
                faces = haar.detectMultiScale(gray_img, 1.3, 5)
                print(faces)
                for f_x, f_y, f_w, f_h in faces:
                    face = img[f_y:f_y + f_h, f_x:f_x + f_w]
                    print('inx output0')
                    face = cv2.resize(face, (IMGSIZE, IMGSIZE))
                    #could deal with face to train
                    test_x = np.array([face])
                    test_x = test_x.astype(np.float32) / 255.0

                    res = sess.run([predict, tf.argmax(output, 1)],\
                                   feed_dict={myconv.x_data: test_x,\
                                   myconv.keep_prob_5:1.0, myconv.keep_prob_75: 1.0})
                    print('inx output1')
                    print(res)

                    cv2.putText(img, indextoname[res[1][0]], (f_x, f_y - 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 2)  #显示名字
                    img = cv2.rectangle(img, (f_x, f_y),
                                        (f_x + f_w, f_y + f_h), (255, 0, 0), 2)
                n += 1
                cv2.imshow('img', img)
                time.sleep(5)
                key = cv2.waitKey(30) & 0xff
                if key == 27:
                    break
            else:
                break
    #camera.release()
    cv2.destroyAllWindows()
コード例 #3
0
def testfromcamera(train_x, train_y, chkpoint):

    # 打开默认摄像头
    camera = cv2.VideoCapture(0)
    #haar = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    pathlabelpair, indextoname = getfileandlabel(rootpath +
                                                 '\\image\\trainfaces')

    # 得到预测值
    output = myconv.cnnLayer(len(pathlabelpair), False)

    predict = output

    # 得到dlib的人脸检测器
    detector = dlib.get_frontal_face_detector()

    # 加载模型
    saver = tf.train.Saver()

    with tf.Session() as sess:
        #sess.run(tf.global_variables_initializer())
        saver.restore(sess, chkpoint)

        n = 1
        while 1:
            if (n <= 20000):
                print('It`s processing %s image.' % n)
                sys.stdout.flush()

                # 间隔0.2s
                time.sleep(0.2)

                # 读帧
                success, img = camera.read()
                # 得到灰度图
                gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

                # 使用detector进行人脸检测
                faces = detector(gray_img, 1)
                #faces = haar.detectMultiScale(gray_img, 1.3, 5)
                for i, d in enumerate(faces):
                    x1 = d.top() if d.top() > 0 else 0
                    y1 = d.bottom() if d.bottom() > 0 else 0
                    x2 = d.left() if d.left() > 0 else 0
                    y2 = d.right() if d.right() > 0 else 0

                    face = img[x1:y1, x2:y2]

                    face = cv2.resize(face, (IMGSIZE, IMGSIZE))
                    #could deal with face to train
                    test_x = np.array([face])
                    test_x = test_x.astype(np.float32) / 255.0

                    res = sess.run([predict, tf.argmax(output, 1)],\
                                   feed_dict={myconv.x: test_x,\
                                   myconv.keep_prob_5:1.0, myconv.keep_prob_75: 1.0})
                    print(res, indextoname[res[1][0]], res[0][0][res[1][0]])
                    sys.stdout.flush()

                    # 得到一组随机的颜色值
                    r = random.randint(0, 255)
                    g = random.randint(0, 255)
                    b = random.randint(0, 255)

                    # 绘制检测到的人脸的方框
                    cv2.rectangle(img, (x2, x1), (y2, y1), (r, g, b), 3)

                    # if res[0][0][res[1][0]] >= 500:
                    #     cv2.putText(img, 'others', (x1, y1 + 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (col, col, col), 2)  #显示名字
                    # else:
                    #     cv2.putText(img, indextoname[res[1][0]], (x1, y1 - 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (col, col, col), 2)  #显示名字
                    cv2.putText(img, indextoname[res[1][0]],
                                (x2 + 25, y1 + 40), cv2.FONT_HERSHEY_SIMPLEX,
                                1, (r, g, b), 2)  #显示名字

                    n += 1
                    cv2.imshow('img', img)
                key = cv2.waitKey(30) & 0xff
                if key == 27:
                    break
            else:
                break
    camera.release()
    cv2.destroyAllWindows()