def facial_detact(modelPath, showBox=True):
    face_x = tf.placeholder(tf.float32, [None, 2304])
    y_conv = deepnn(face_x)
    probs = tf.nn.softmax(y_conv)

    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state(modelPath)
    sess = tf.Session()
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print(
            'Restore model sucsses!!\n\nNOTE: Press SPACE on keyboard to capture face.'
        )

    feelings_faces = []
    for index, emotion in enumerate(EMOTIONS):
        feelings_faces.append(
            cv2.imread('./data/emojis/' + emotion + '.png', -1))

    video_captor = cv2.VideoCapture(0)

    emoji_face = []
    result = None

    while True:
        ret, frame = video_captor.read()
        detected_face, face_coor = format_image(frame)
        if showBox:
            if face_coor is not None:
                [x, y, w, h] = face_coor
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)

        if cv2.waitKey(1) & 0xFF == ord(' '):

            if detected_face is not None:
                cv2.imwrite('a.jpg', detected_face)
                tensor = image_to_tensor(detected_face)
                result = sess.run(probs, feed_dict={face_x: tensor})
                print(EMOTIONS[np.argmax(result[0])])
        if result is not None:
            for index, emotion in enumerate(EMOTIONS):
                cv2.putText(frame, emotion, (10, index * 20 + 20),
                            cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1)
                cv2.rectangle(frame, (130, index * 20 + 10),
                              (130 + int(result[0][index] * 100),
                               (index + 1) * 20 + 4), (255, 0, 0), -1)
                emoji_face = feelings_faces[np.argmax(result[0])]

            for c in range(0, 3):
                frame[200:320, 10:130, c] = emoji_face[:, :, c] * (
                    emoji_face[:, :, 3] /
                    255.0) + frame[200:320, 10:130,
                                   c] * (1.0 - emoji_face[:, :, 3] / 255.0)
        cv2.imshow('face', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    video_captor.release()
    cv2.destroyAllWindows()
示例#2
0
def demo(modelPath, showBox=True):
    face_x = tf.placeholder(tf.float32, [None, 2304])
    y_conv = deepnn(face_x)
    probs = tf.nn.softmax(y_conv)

    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state(modelPath)
    sess = tf.Session()
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)

    feelings_faces = []
    for index, emotion in enumerate(EMOTIONS):
        feelings_faces.append(
            cv2.imread('./data/emojis/' + emotion + '.png', -1))
    emoji_face = []

    img = input("Enter the image file name: ")
    while True:
        frame = cv2.imread(img)
        detected_face, face_coor = format_image(frame)

        if face_coor is not None:
            [x, y, w, h] = face_coor
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
        result = None
        if detected_face is not None:
            cv2.imwrite('face.jpg', detected_face)
            tensor = image_to_tensor(detected_face)
            result = sess.run(probs, feed_dict={face_x: tensor})
        if result is not None:
            for index, emotion in enumerate(EMOTIONS):
                cv2.putText(frame, emotion, (10, index * 20 + 20),
                            cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1)
                cv2.rectangle(frame, (130, index * 20 + 10),
                              (130 + int(result[0][index] * 100),
                               (index + 1) * 20 + 4), (255, 0, 0), -1)
                emoji_face = feelings_faces[np.argmax(result[0])]
            for c in range(0, 3):
                frame[200:320, 10:130, c] = emoji_face[:, :, c] * (
                    emoji_face[:, :, 3] /
                    255.0) + frame[200:320, 10:130,
                                   c] * (1.0 - emoji_face[:, :, 3] / 255.0)
        else:
            comment = 'We cannot detect a face....Too bad!'
            cv2.putText(frame, comment, (115, 250), cv2.FONT_HERSHEY_PLAIN,
                        1.5, (0, 0, 0), 1)
        cv2.imwrite('result1.jpg', frame)
        cv2.namedWindow('Guess how I feel', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('Guess how I feel', 450, 600)
        cv2.imshow('Guess how I feel', frame)
        k = cv2.waitKey(10) & 0xff  # Press 'ESC' for exiting video
        if k == 27:
            break
def demo(modelPath, showBox=False,SAMPLE_IMAGE_PATH=SAMPLE_IMAGE):
  face_x = tf.placeholder(tf.float32, [None, 2304])
  y_conv = deepnn(face_x)
  probs = tf.nn.softmax(y_conv)

  saver = tf.train.Saver()
  ckpt = tf.train.get_checkpoint_state(modelPath)
  sess = tf.Session()
  if ckpt and ckpt.model_checkpoint_path:
    saver.restore(sess, ckpt.model_checkpoint_path)
    
  result = None

  frame = SAMPLE_IMAGE_PATH
  detected_face, face_coor = format_image(frame) #face Coordinates
  
  if showBox:
      if face_coor is not None:
        [x,y,w,h] = face_coor
        cv2.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 2)

  if detected_face is not None:
        cv2.imwrite('temp.jpg', detected_face)
        tensor = image_to_tensor(detected_face)
        result = sess.run(probs, feed_dict={face_x: tensor})
        print(result)
  jsonString='{'
  maximum=0.00
  FINAL_EMOTION=''
  if result is not None:
      for index, emotion in enumerate(EMOTIONS):
        if(index!=0):
          jsonString+=','

        percentage= float("{0:.2f}".format(result[0][index]*100))
        if(percentage==0.0):
          percentage=0
  
        jsonString+='"'+str(emotion)+'":'+str(percentage)
        if(maximum<percentage):
            maximum=copy.copy(percentage)
            FINAL_EMOTION=emotion
        print('--------------------------------')
        print('                                ')
        
        print(str(emotion)+" : "+str(percentage)+"%")
        print('                                ')
        
  print('=========================================================')
  print('The Result for Input Image is : '+str(FINAL_EMOTION)+" ( "+str(maximum)+"% )")
  print('=========================================================')
  jsonString+='}'
  return jsonString
示例#4
0
def main(_):
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

    with tf.variable_scope('model'):
        x = tf.placeholder(tf.float32, [None, 784], name="x")
        y_ = tf.placeholder(tf.float32, [None, 10], name='y_')

        training = tf.placeholder(bool, (), name='mode')

        ybar, logits = deepnn(x, logits=True, training=training)

        cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
            labels=y_, logits=logits),
                                       name="cse")

        correct_prediction = tf.equal(tf.argmax(ybar, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
                                  name="acc")

    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(FLAGS.train_steps):
            batch = mnist.train.next_batch(50)
            if i % 100 == 0:
                train_accuracy = accuracy.eval(feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    training: False
                })
                print('step %d, training accuracy %g' % (i, train_accuracy))

            train_step.run(feed_dict={
                x: batch[0],
                y_: batch[1],
                training: True
            })

        saver = tf.train.Saver()
        saver.save(sess, FLAGS.output_dir + "/model")

        print('test accuracy %g' % accuracy.eval(feed_dict={
            x: mnist.test.images,
            y_: mnist.test.labels,
            training: False
        }))
def demo(modelPath, showBox=False):
  face_x = tf.placeholder(tf.float32, [None, 2304])
  y_conv = deepnn(face_x)
  probs = tf.nn.softmax(y_conv)

  saver = tf.train.Saver()
  ckpt = tf.train.get_checkpoint_state(modelPath)
  sess = tf.Session()
  if ckpt and ckpt.model_checkpoint_path:
    saver.restore(sess, ckpt.model_checkpoint_path)
    print('Restore model sucsses!!\nNOTE: Press SPACE on keyboard to capture face.')

  feelings_faces = []
  for index, emotion in enumerate(EMOTIONS):
    feelings_faces.append(cv2.imread('./data/emojis/' + emotion + '.png', -1))
  video_captor = cv2.VideoCapture(0)

  emoji_face = []
  result = None

  while True:
    ret, frame = video_captor.read()
    detected_face, face_coor = format_image(frame)
    if showBox:
      if face_coor is not None:
        [x,y,w,h] = face_coor
        cv2.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 2)

    if cv2.waitKey(1) & 0xFF == ord(' '):

      if detected_face is not None:
        cv2.imwrite('a.jpg', detected_face)
        tensor = image_to_tensor(detected_face)
        result = sess.run(probs, feed_dict={face_x: tensor})
        # print(result)
    if result is not None:
      for index, emotion in enumerate(EMOTIONS):
        cv2.putText(frame, emotion, (10, index * 20 + 20), cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1)
        cv2.rectangle(frame, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4),
                      (255, 0, 0), -1)
        emoji_face = feelings_faces[np.argmax(result[0])]

      for c in range(0, 3):
        frame[200:320, 10:130, c] = emoji_face[:, :, c] * (emoji_face[:, :, 3] / 255.0) + frame[200:320, 10:130, c] * (1.0 - emoji_face[:, :, 3] / 255.0)
    cv2.imshow('face', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
      break
示例#6
0
def demo(modelPath, showBox=False):
    face_x = tf.compat.v1.placeholder(tf.float32, [None, 2304])
    y_conv = deepnn(face_x)
    probs = tf.nn.softmax(y_conv)

    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state(modelPath)
    sess = tf.Session()
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print(
            'Restore model sucsses!!\nNOTE: Press SPACE on keyboard to capture face.'
        )

    video_path = "test.mp4"
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS)  # 获取视频的帧率
    size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))  # 获取视频的大小

    fourcc = cv2.VideoWriter_fourcc(*'mpeg')  # 要保存的视频格式
    # 把处理过的视频保存下来
    output_viedo = cv2.VideoWriter()
    # 保存的视频地址
    video_save_path = 'trans.mp4'
    output_viedo.open(video_save_path, fourcc, fps, size, True)
    result = None

    while True:
        ret, frame = cap.read()
        detected_face, face_coor = format_image(frame)
        if showBox:
            if face_coor is not None:
                specify(face_coor, frame)
        if detected_face is not None:
            tensor = image_to_tensor(detected_face)
            result = sess.run(probs, feed_dict={face_x: tensor})
            print(result)
        output_viedo.write(frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
示例#7
0
def demo(modelPath, showBox=True):
    face_x = tf.placeholder(tf.float32, [None, 2304])
    y_conv = deepnn(face_x)
    probs = tf.nn.softmax(y_conv)

    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state(modelPath)
    sess = tf.Session()
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print(
            'Restore model sucsses!!\nNOTE: Press SPACE on keyboard to capture face.'
        )

    feelings_faces = []
    for index, emotion in enumerate(EMOTIONS):
        feelings_faces.append(
            cv2.imread('./data/emojis/' + emotion + '.png', -1))
    # video_captor = cv2.VideoCapture(0)
    demo_image = cv2.imread('./demo_image.jpg')
    detected_face, face_coor = format_image(demo_image)

    if showBox:
        if face_coor is not None:
            [x, y, w, h] = face_coor
            cv2.rectangle(demo_image, (x, y), (x + w, y + h), (255, 0, 0), 2)
    cv2.imshow('demo_image', demo_image)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    if detected_face is not None:
        cv2.imwrite('a.jpg', detected_face)
        tensor = image_to_tensor(detected_face)
        result = sess.run(y_conv, feed_dict={face_x: tensor})
        print('result.shape: ', result.shape)
        print('result: ', result)
        most_likely_index = np.argmax(result)
        # print(most_likely_index)
        print(EMOTIONS[most_likely_index])
    sess.close()
示例#8
0
def main(argv):
    imvalue = load_data(argv)
    x = tf.placeholder(tf.float32, [None, 784])
    y_ = tf.placeholder(tf.float32, [None, 10])
    y_conv, keep_prob = model.deepnn(x)

    y_predict = tf.nn.softmax(y_conv)
    init_op = tf.global_variables_initializer()
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(init_op)
        saver.restore(sess, "./mnist_cnn_model.ckpt")
        #saver=tf.train.import_meta_graph("E:/Project/Python/mnist/MNIST_my/mnist_cnn_model.ckpt.meta")

        prediction = tf.argmax(y_predict, 1)
        predint = prediction.eval(feed_dict={
            x: [imvalue],
            keep_prob: 1.0
        },
                                  session=sess)
        print(predint[0])
示例#9
0
文件: demo.py 项目: ippler/FER
def demo(modelPath, showBox=False):
    face_x = tf.placeholder(tf.float32, [None, 2304])  # 定义一个TensorFlow的占位符
    y_conv = deepnn(face_x)  # 输出一个维度为(1*7)的结果
    probs = tf.nn.softmax(y_conv)  # 使用softmax激活函数将结果计算为哪种表情的概率

    saver = tf.train.Saver()  # 定义一个Saver对象
    ckpt = tf.train.get_checkpoint_state(
        modelPath)  # 得到check_point路径,生成一个check_point对象

    sess = tf.Session()  # 定义一个TensorFlow的对话,在对话中去执行代码和训练模型
    if ckpt and ckpt.model_checkpoint_path:
        # 如果check_point对象存在,并且check_point的路径存在
        saver.restore(
            sess, ckpt.model_checkpoint_path)  # 保存Session对话 这一行代码可能实在训练完之后才会执行
        print(
            'Restore model sucsses!!\nNOTE: Press SPACE on keyboard to capture face.'
        )

    feelings_faces = []
    for index, emotion in enumerate(EMOTIONS):
        # 将EMOTION 转换为枚举类型后提取 索引和值
        feelings_faces.append(
            cv2.imread('./data/emojis/' + emotion + '.png', -1))
        # cv2.imread()读取emojis文件夹下的表情图像转化为二进制数据存储到felling_facs中
    video_captor = cv2.VideoCapture(0)  # CV2.VideoCapture()打开摄像头获取照片 按空格拍照

    emoji_face = []
    result = None

    while True:
        ret, frame = video_captor.read()  # 读取拍到的照片转化为二进制数据 存储到frame中
        detected_face, face_coor = format_image(
            frame)  # format_image()函数定义在 14 行,得到脸部图像和坐标
        if showBox:
            if face_coor is not None:
                [x, y, w, h] = face_coor
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

        if cv2.waitKey(1) & 0xFF == ord(' '):  # 不知道干嘛用的,以后再说

            if detected_face is not None:
                cv2.imwrite('a.jpg', detected_face)  # 保存脸的图像
                tensor = image_to_tensor(detected_face)
                # 将图像转化为一个48*48的0到255灰度图像并转化为np.array数据
                result = sess.run(probs, feed_dict={
                    face_x: tensor
                })  # 运行TensorFlow模型,计算表情的概率,返回模型训练结果
                # print(result)
        if result is not None:
            for index, emotion in enumerate(EMOTIONS):
                cv2.putText(frame, emotion, (10, index * 20 + 20),
                            cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1)
                # 将人脸的表情的文字添加到人脸图片上, 参数是什么以后站再说
                cv2.rectangle(frame, (130, index * 20 + 10),
                              (130 + int(result[0][index] * 100),
                               (index + 1) * 20 + 4), (255, 0, 0), -1)
                emoji_face = feelings_faces[np.argmax(result[0])]

            for c in range(0, 3):
                frame[200:320, 10:130, c] = emoji_face[:, :, c] * (
                    emoji_face[:, :, 3] /
                    255.0) + frame[200:320, 10:130,
                                   c] * (1.0 - emoji_face[:, :, 3] / 255.0)
        cv2.imshow('face', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
示例#10
0
def demo(modelPath, showBox=False):
    face_x = tf.placeholder(tf.float32, [None, 2304])
    y_conv = deepnn(face_x)
    probs = tf.nn.softmax(y_conv)

    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state(modelPath)
    sess = tf.Session()

    # Get the model
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print('Restore model sucsses!!')
        # print('NOTE: Press SPACE on keyboard to capture face.')

    feelings_faces = []
    for index, emotion in enumerate(EMOTIONS):
        feelings_faces.append(
            cv2.imread('./data/emojis/' + emotion + '.png', -1))
    video_captor = cv2.VideoCapture(0)

    emoji_face = []
    result = None

    while True:
        ret, frame = video_captor.read()
        # time.sleep(0.1)
        detected_face, face_coor = format_image(frame)
        if showBox:
            if face_coor is not None:
                [x, y, w, h] = face_coor
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

        # if cv2.waitKey(1) & 0xFF == ord(' '):
        # Not using because it calls the model again and again
        if detected_face is not None:
            # cv2.imwrite('a.jpg', detected_face)
            tensor = image_to_tensor(detected_face)
            result = sess.run(probs, feed_dict={face_x: tensor})
            # print(result)

        if result is not None:
            for index, emotion in enumerate(EMOTIONS):
                # Label(emotion) display
                cv2.putText(frame, emotion, (10, index * 20 + 20),
                            cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1)

                # Histogram display
                cv2.rectangle(frame, (130, index * 20 + 10),
                              (130 + int(result[0][index] * 100),
                               (index + 1) * 20 + 4), (255, 0, 0), -1)
                # emoji<-emotion of the highest score
                emoji_face = feelings_faces[np.argmax(result[0])]
            # Emoji(120*120) display
            for c in range(0, 3):
                frame[200:320, 10:130, c] = emoji_face[:, :, c] * (
                    emoji_face[:, :, 3] /
                    255.0) + frame[200:320, 10:130,
                                   c] * (1.0 - emoji_face[:, :, 3] / 255.0)
        # Real-time capture
        cv2.imshow('face', frame)
        if cv2.waitKey(1) & 0xFF == ord(' '):
            cv2.imwrite('fer_result/' + str(uuid.uuid4().hex) + '.png', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
caffe.set_mode_gpu()
PNet = caffe.Net(caffe_model_path + "/det1.prototxt", caffe_model_path + "/det1.caffemodel", caffe.TEST)
RNet = caffe.Net(caffe_model_path + "/det2.prototxt", caffe_model_path + "/det2.caffemodel", caffe.TEST)
ONet = caffe.Net(caffe_model_path + "/det3.prototxt", caffe_model_path + "/det3.caffemodel", caffe.TEST)

owd = os.getcwd()
face_model_path = 'models/recognition/'

face_model = './face_deploy.prototxt'
face_weights = './85_accuracy.caffemodel'
center_facenet = caffe.Net(face_model_path + face_model, face_model_path + face_weights, caffe.TEST)

if EXPRESSION_DETECTION_ENABLED:
    face_x = tf.placeholder(tf.float32, [None, 2304])
    y_conv = deepnn(face_x)
    probs = tf.nn.softmax(y_conv)
    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state('models/expression/ckpt')
    EMOTIONS = ['Angry', 'Disgusted', 'Fearful', 'Happy', 'Sad', 'Surprised', 'Neutral']

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print('Restore model sucsses!!\nNOTE: Press SPACE on keyboard to capture face.')
        
        

def drawBoxes(im, boxes):
示例#12
0
def demo(modelPath, showBox=True):
    face_x = tf.placeholder(tf.float32, [None, 2304])
    y_conv = deepnn(face_x)
    probs = tf.nn.softmax(y_conv)

    saver = tf.train.Saver()
    ckpt = tf.train.get_checkpoint_state(modelPath)
    sess = tf.Session()
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print(
            'Restore model sucsses!!\nNOTE: Press SPACE on keyboard to capture face.'
        )

    # feelings_faces = []
    # for index, emotion in enumerate(EMOTIONS):
    #   num = random.randint(1,3)
    #   feelings_faces.append(cv2.imread('./data/emojis/' + emotion + str(num) + '.jpg', -1))
    # emoji_face = []

    result = None

    video_captor = cv2.VideoCapture(0)

    while True:
        ret, frame = video_captor.read()
        detected_face, face_coor = format_image(frame)

        window_name = 'Face Expression Recognition'
        cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
        cv2.moveWindow(window_name, 0, 0)

        if showBox:
            if face_coor is not None:
                [x, y, w, h] = face_coor
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

        ratio = 0.8
        frame = cv2.resize(frame,
                           None,
                           fx=ratio,
                           fy=ratio,
                           interpolation=cv2.INTER_CUBIC)
        move_dx = (1 - ratio) * 1280
        move_dy = (1 - ratio) * 360
        M = np.float32([[1, 0, move_dx], [0, 1, move_dy]])
        new_height, new_width, _ = frame.shape
        frame = cv2.warpAffine(frame, M, (1280, 720))

        cv2.rectangle(frame, (0, 0), (int(move_dx), 720), (255, 255, 255), -1)

        if cv2.waitKey(10) & 0xFF == ord(' '):

            if detected_face is not None:
                cv2.imwrite('a.jpg', detected_face)
                tensor = image_to_tensor(detected_face)
                result = sess.run(probs, feed_dict={face_x: tensor})
                # print(result)
        if result is not None:
            for index, emotion in enumerate(EMOTIONS):
                cv2.putText(frame, emotion, (10, index * 20 + 100),
                            cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), 1)
                cv2.rectangle(frame, (130, index * 20 + 90),
                              (130 + int(result[0][index] * 100),
                               (index + 1) * 20 + 84), (255, 0, 0), -1)
                emotion_type = EMOTIONS[np.argmax(result[0])]
                prob = float(np.max(result[0]) * 100)
                cv2.putText(frame, emotion_type, (20, 490),
                            cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 0), 1)
                cv2.putText(frame, str('%.2f' % prob + "%"), (150, 490),
                            cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 0), 1)

            #   emoji_face = feelings_faces[np.argmax(result[0])]
            # print("1")
            # for c in range(0, 3):
            #   frame[200:320, 10:130, c] = emoji_face[:, :, c] * (emoji_face[:, :, 3] / 255.0) + frame[200:320, 10:130, c] * (1.0 - emoji_face[:, :, 3] / 255.0)

        detected_img = cv2.imread('a.jpg', 0)
        detected_img = cv2.resize(detected_img,
                                  None,
                                  fx=2,
                                  fy=2,
                                  interpolation=cv2.INTER_CUBIC)
        for c in range(0, 3):
            frame[340:436,
                  int(move_dx / 2) - 48:int(move_dx / 2) + 48,
                  c] = detected_img

        cv2.imshow(window_name, frame)
        if cv2.waitKey(10) & 0xFF == ord('q'):
            break
示例#13
0
import model




mnist = input_data.read_data_sets('MNIST_data', one_hot=True)#读取数据

  #输入变量,mnist图片大小为28*28
x = tf.placeholder(tf.float32, [None, 784])

#输出变量,数字是1-10
y_ = tf.placeholder(tf.float32, [None, 10])

  # 构建网络,输入—>第一层卷积—>第一层池化—>第二层卷积—>第二层池化—>第一层全连接—>第二层全连接
y_conv, keep_prob = model.deepnn(x)

  #第一步对网络最后一层的输出做一个softmax,第二步将softmax输出和实际样本做一个交叉熵
  #cross_entropy返回的是向量
with tf.name_scope('loss'):
  cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                            logits=y_conv)

  #求cross_entropy向量的平均值得到交叉熵
cross_entropy = tf.reduce_mean(cross_entropy)

  #AdamOptimizer是Adam优化算法:一个寻找全局最优点的优化算法,引入二次方梯度校验
with tf.name_scope('adam_optimizer'):
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

  #在测试集上的精确度
示例#14
0
                            maxval=0.03,
                            dtype=tf.float32)
    x_in = tf.add(
        x_in,
        tf.random_normal(shape=tf.shape(x_in),
                         mean=0.0,
                         stddev=std,
                         dtype=tf.float32))

x_in = tf.minimum(x_in, 1.0)
x_in = tf.maximum(x_in, 0.0)
y_ = tf.placeholder(tf.int64, [None])

# Network specification based on hyper-parameters
with tf.variable_scope("model"):
    y, keep_prob = model.deepnn(x_in, fsize, ldepth, lwidth, bn, act, itype,
                                True, True)

for v in tf.global_variables():
    print("Name: ", v.name)

print('Model size = %d weights\n' % model.count_all_vars())
sys.stdout.flush()

# Loss
with tf.name_scope('loss'):
    cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=y)
cross_entropy = tf.reduce_mean(cross_entropy)

# Optimizer specification
with tf.variable_scope("optimizer"):
    extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
def demo(modelPath, showBox=False):
  face_x = tf.placeholder(dtype=tf.float32, name='inputs', shape=[None, 2304])

  #parser = argparse.ArgumentParser()
  #parser.add_argument("--fz_model_fn",default = "./emotion_model_frozen.pb",type=str,help="Frozen model file to import")
  #args = parser.parse_args()
  #graph = load_graph(args.fz_model_fn)
  
  #for op in graph.get_operations():
    #print(op.name,op.values())
  
  #x = graph.get_tensor_by_name('prefix/inputs:0')
  #probs = graph.get_tensor_by_name('prefix/output_node:0')
  
  y_conv = deepnn(face_x)
  probs = tf.nn.softmax(y_conv, name='output_node')
  
  print("probs",probs)
  saver = tf.train.Saver()
  ckpt = tf.train.get_checkpoint_state(modelPath)
  sess = tf.Session()
  if ckpt and ckpt.model_checkpoint_path:
     saver.restore(sess, ckpt.model_checkpoint_path)
     print('Restore model sucsses!!\nNOTE: Press SPACE on keyboard to capture face.')

  feelings_faces = []
  for index, emotion in enumerate(EMOTIONS):
    feelings_faces.append(cv2.imread('./data/emojis/' + emotion + '.png', -1))
  video_captor = cv2.VideoCapture(0)

  emoji_face = []
  result = None

  while True:
    ret, frame = video_captor.read()
    detected_face, face_coor = format_image(frame)
    if showBox:
      if face_coor is not None:
        [x,y,w,h] = face_coor
        cv2.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 2)

    if cv2.waitKey(1):

      if detected_face is not None:
        cv2.imwrite('a.jpg', detected_face)
        tensor = image_to_tensor(detected_face)

        result = sess.run(probs, feed_dict={face_x: tensor})

        print(result)

        print("probs",probs)
        tf.train.write_graph(sess.graph_def, './', "nn_model.pbtxt", as_text=True)
        for op in tf.get_default_graph().get_operations():
            print(op.name)

        # print(result)
    if result is not None:
      for index, emotion in enumerate(EMOTIONS):
        cv2.putText(frame, emotion, (10, index * 20 + 20), cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1)
        cv2.rectangle(frame, (130, index * 20 + 10), (130 + int(result[0][index] * 100), (index + 1) * 20 + 4),
                      (255, 0, 0), -1)
        emoji_face = feelings_faces[np.argmax(result[0])]

      for c in range(0, 3):
        frame[200:320, 10:130, c] = emoji_face[:, :, c] * (emoji_face[:, :, 3] / 255.0) + frame[200:320, 10:130, c] * (1.0 - emoji_face[:, :, 3] / 255.0)
    cv2.imshow('face', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
      break