Пример #1
0
    def __init__(self, ip, port, total_steps, emo_model):
        self.ip = ip
        self.port = port

        self.action_dim = 15
        self.epi_num = 1
        self.state_num = 0

        #Emotion recognition module
        self.emo_list = ['angry','contemptuous','disgusted','fearful',\
                'happy','neutral','sad','surprised']
        self.face_det = dlib.get_frontal_face_detector()
        self.lm_det = dlib.shape_predictor('../model/shape_predictor_68_face_landmarks.dat')
        self.frames = 10

        self.lm_ph = tf.placeholder(tf.float32, [None, 51*2])
        self.img_ph = tf.placeholder(tf.float32, [None, 224, 224, 3])
        self.keep_prob = tf.placeholder(tf.float32)

        self.sess = tf.Session()
        dgn = vgg_face.DGN()
        dgn.build(self.lm_ph, self.keep_prob)
        self.pred_dgn = tf.argmax(dgn.prob, 1)

        dan = vgg_face.Vgg_face()
        dan.build(self.img_ph, self.keep_prob)
        self.pred_dan = tf.argmax(dan.prob, 1)

        self.emo_model = emo_model

        if self.emo_model == 'weighted-sum':
            prob_sum = tf.nn.softmax(dan.fc8+dgn.fc3)
            pred_sum = tf.argmax(prob_sum, 1)
        elif self.emo_model == 'joint-fine-tune':
            saver = tf.train.Saver()
            saver.restore(self.sess, '../model/dgan.ckpt')
            prob_joint = tf.nn.softmax(dan.fc8+dgn.fc3)
            self.pred_joint = tf.argmax(prob_joint, 1)

        self.sess.run(tf.global_variables_initializer())

        self.total_steps = total_steps
        random.seed()

        ipcam_url = 'http://admin:@'+ self.ip + ':' + str(self.port) + '/MJPEG.CGI'
        self.stream=urllib.urlopen(ipcam_url)
        self.stream.close()
        self.ipCamStart = True
        ipCamThread = threading.Thread(target=self._ipCamThread)
        ipCamThread.start()
Пример #2
0
    def __init__(self, total_steps):
        #IP cam emotion recognition
        self.lm_ph = tf.placeholder(tf.float32, [None, 51 * 2])
        self.img_ph = tf.placeholder(tf.float32, [None, 224, 224, 3])
        self.keep_prob = tf.placeholder(tf.float32)

        self.sess = tf.Session()
        dgn = vgg_face.DGN()
        dgn.build(self.lm_ph, self.keep_prob)
        self.pred_dgn = tf.argmax(dgn.prob, 1)

        dan = vgg_face.Vgg_face()
        dan.build(self.img_ph, self.keep_prob)
        self.pred_dan = tf.argmax(dan.prob, 1)

        if model == 'weighted-sum':

            prob_sum = tf.nn.softmax(dan.fc8 + dgn.fc3)
            pred_sum = tf.argmax(prob_sum, 1)

        elif model == 'joint-fine-tune':

            saver = tf.train.Saver()
            saver.restore(self.sess, '../model/dgan.ckpt')
            prob_joint = tf.nn.softmax(dan.fc8 + dgn.fc3)
            self.pred_joint = tf.argmax(prob_joint, 1)

        self.sess.run(tf.global_variables_initializer())
        self.reward = []

        #initialize state
        #Edwinn set alpha=0.3, epsilon=0.5, gamma=0.9 for first suggestion matches
        #for 4 suggestion matches, Edwinn set alpha=1, epsilon=0.1, gamma=0.9
        self.ai = irl.sarsa(actions=range(n_actions),
                            epsilon=1.0,
                            alpha=0.3,
                            gamma=0.9)
        #self.p_IFV = random.choice([1, 32, 16])
        self.p_IFV = random.choice(range(256))
        self.p_action1 = self.ai.chooseAction(self.p_IFV)
        self.p_action2 = np.random.choice(range(len(pref)), p=pref)
        self.p_action = np.random.choice([self.p_action1, self.p_action2],
                                         p=[0.2, 0.8])
        #self.p_action = self.p_action1
        self.epi_reward = 0
        self.count = 0
        self.total_steps = total_steps
Пример #3
0
def main():

    with open('../data/ex_train_list.pkl', 'rb') as f1:
        train_list = pickle.load(f1)
    with open('../data/ex_landmark.pkl', 'rb') as f2:
        landmark = pickle.load(f2)

    print(len(train_list))

    print('Loading data...')
    x_train, label, shape = load_data(train_list, landmark)
    x_train = np.asarray(x_train)
    shape = np.asarray(shape)
    label = np.asarray(label)

    (im_train, lm_train, gt_train), (x_val, lm_val,
                                     gt_val) = split_data(x_train,
                                                          shape,
                                                          label,
                                                          split_ratio=0.1)

    img_ph = tf.placeholder(tf.float32, [None, 224, 224, 3])
    lm_ph = tf.placeholder(tf.float32, [None, 51 * 2])
    label_ph = tf.placeholder(tf.float32, [None, 8])
    keep_prob = tf.placeholder(tf.float32)
    lr_ph = tf.placeholder(tf.float32)

    with tf.Session() as sess:

        dan = vgg_face.Vgg_face()
        dan.build(img_ph, keep_prob)
        dgn = vgg_face.DGN()
        dgn.build(lm_ph, keep_prob)

        with tf.name_scope('dan'):
            dan_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=dan.fc8, labels=label_ph)
            dan_loss = tf.reduce_mean(dan_cross_entropy)

        with tf.name_scope('dgn'):
            dgn_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=dgn.fc3, labels=label_ph)
            dgn_loss = tf.reduce_mean(dgn_cross_entropy)

        with tf.name_scope('dagn'):
            dagn_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=dan.fc8 + dgn.fc3, labels=label_ph)
            dagn_loss = tf.reduce_mean(dagn_cross_entropy)

        with tf.name_scope('loss'):
            loss = dan_loss + dgn_loss + 0.1 * dagn_loss
            train_step = tf.train.AdamOptimizer(lr_ph).minimize(loss)

        with tf.name_scope('acc'):
            pred = tf.nn.softmax(dan.fc8 + dgn.fc3)
            correct_prediction = tf.equal(tf.argmax(pred, 1),
                                          tf.argmax(label_ph, 1))
            accuracy = tf.reduce_sum(tf.cast(correct_prediction, tf.float32))

        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        best_acc = 0.0
        lr = 1e-4
        best_loss = 1000
        for i in range(epoch):

            if i % 50 == 0 and i != 0:
                lr = 1e-4
                print('\nlearning rate has reset to', lr)
            lr = 0.98 * lr

            cnt = 0
            for im, lm, gt in gen_batch(im_train, lm_train, gt_train,
                                        batch_size):

                tStart = time.time()
                sess.run(train_step,
                         feed_dict={
                             img_ph: im,
                             lm_ph: lm,
                             label_ph: gt,
                             keep_prob: 1.0,
                             lr_ph: lr
                         })
                tEnd = time.time()
                print_process(cnt, im_train.shape[0] // batch_size,
                              tEnd - tStart)
                if cnt == im_train.shape[0] // batch_size:
                    break
                cnt += 1

            train_acc = 0.0
            train_loss = 0.0
            for im, lm, gt in gen_batch(im_train, lm_train, gt_train,
                                        batch_size):

                acc, l = sess.run((accuracy, loss),
                                  feed_dict={
                                      img_ph: im,
                                      lm_ph: lm,
                                      label_ph: gt,
                                      keep_prob: 1.0
                                  })
                train_acc += acc
                train_loss += l

            val_acc = 0.0
            val_loss = 0.0
            for im, lm, gt in gen_batch(x_val, lm_val, gt_val, batch_size):

                acc, l = sess.run((accuracy, loss),
                                  feed_dict={
                                      img_ph: im,
                                      lm_ph: lm,
                                      label_ph: gt,
                                      keep_prob: 1.0
                                  })
                val_acc += acc
                val_loss += l

            if (best_acc == val_acc / x_val.shape[0] and best_loss > val_loss
                ) or best_acc < val_acc / x_val.shape[0]:

                print("Epoch: %d, training accuracy %.4f, loss: %.4f, val_acc: %.4f, val_loss: %.4f     val improve from %.4f to %.4f, save model." \
                    %(i+1, train_acc/im_train.shape[0], train_loss, val_acc/x_val.shape[0], val_loss, best_acc, val_acc/x_val.shape[0]))
                best_acc = val_acc / x_val.shape[0]
                best_loss = val_loss
                saver.save(sess, '../model/dgan.ckpt')

            else:

                print("Epoch: %d, training accuracy %.4f, loss: %.4f, val_acc: %.4f, val_loss: %.4f     val_acc doesn't improve." \
                 %(i+1, train_acc/im_train.shape[0], train_loss, val_acc/x_val.shape[0], val_loss))
Пример #4
0
def main(model):

    #parser = argparse.ArgumentParser(prog='test.py', description='ref_emotion.')
    #parser.add_argument('--model', type=str, default='dgn')
    #args = parser.parse_args()

    lm_ph = tf.placeholder(tf.float32, [None, 51 * 2])
    img_ph = tf.placeholder(tf.float32, [None, 224, 224, 3])
    keep_prob = tf.placeholder(tf.float32)

    with tf.Session() as sess:

        dgn = vgg_face.DGN()
        # dgn.train = False
        dgn.build(lm_ph, keep_prob)
        pred_dgn = tf.argmax(dgn.prob, 1)

        dan = vgg_face.Vgg_face()
        # dan.train = False
        dan.build(img_ph, keep_prob)
        pred_dan = tf.argmax(dan.prob, 1)

        if model == 'weighted-sum':

            prob_sum = tf.nn.softmax(dan.fc8 + dgn.fc3)
            pred_sum = tf.argmax(prob_sum, 1)

        elif model == 'joint-fine-tune':

            saver = tf.train.Saver()
            saver.restore(sess, '../model/dgan.ckpt')
            prob_joint = tf.nn.softmax(dan.fc8 + dgn.fc3)
            pred_joint = tf.argmax(prob_joint, 1)

        sess.run(tf.global_variables_initializer())

        stream = urllib.urlopen('http://admin:@192.168.0.194:3333/MJPEG.CGI')
        #stream = cv2.VideoCapture('../../videos/20180226_154918.mp4')
        #stream = cv2.VideoCapture(clip)
        #if stream.isOpened() == False:
        #print('Error opening video stream of file')

        bytes = ''
        emo_record = (np.ones(frames, dtype=int) * 5).tolist()

        #TODO
        '''
        emo_buffer = collections.deque(maxlen=10)
        state_record = []
        final_states = []
        '''

        import time
        time.sleep(10.0)

        while True:
            #while stream.isOpened():

            bytes += stream.read(1024)
            a = bytes.find('\xff\xd8')
            b = bytes.find('\xff\xd9')
            #ret, frame = stream.read()
            if a != -1 and b != -1:
                #if ret == True:
                frame = cv2.imdecode(
                    np.fromstring(bytes[a:b + 2], dtype=np.uint8), 1)
                bytes = bytes[b + 2:]

                num, face, shape, shape_origin = dlib_detect(
                    frame, 2, face_det, lm_det, 224, 224)
                if num == 1:

                    shape_norm = shape[17:] - shape[30]
                    shape_norm = shape_norm.reshape([1, 51 * 2])
                    if model == 'dan':
                        pred = sess.run(pred_dan,
                                        feed_dict={
                                            img_ph:
                                            face.reshape([1, 224, 224, 3]),
                                            keep_prob: 1.0
                                        })
                    elif model == 'dgn':
                        pred = sess.run(pred_dgn,
                                        feed_dict={
                                            lm_ph: shape_norm,
                                            keep_prob: 1.0
                                        })
                    elif model == 'weighted-sum':
                        pred = sess.run(pred_dgn,
                                        feed_dict={
                                            img_ph:
                                            face.reshape([1, 224, 224, 3]),
                                            lm_ph: shape_norm,
                                            keep_prob: 1.0
                                        })
                    elif model == 'joint-fine-tune':
                        pred = sess.run(pred_joint,
                                        feed_dict={
                                            img_ph:
                                            face.reshape([1, 224, 224, 3]),
                                            lm_ph: shape_norm,
                                            keep_prob: 1.0
                                        })

                    emo_record.append(int(pred))
                    del emo_record[0]
                    ctr = collections.Counter(emo_record)

                    #TODO
                    '''
                    emo_buffer.append(ctr)
                    emo_his = collections.Counter()
                    emo_his_table = np.zeros(len(emo_list))
                    emo_now_table = np.zeros(len(emo_list))
                    for c in emo_buffer:
                        emo_his += c
                    emo_his_avg = [v/float(len(emo_buffer)) for v in emo_his.values()]
                    emo_his = emo_his.items()
                    emo_his = np.array([np.array([list(emo_his[i])[0], emo_his_avg[i]]) for i in range(len(emo_his))])
                    emo_his_table[emo_his[:,0].astype(int)] = emo_his[:,1]
                    emo_now = ctr.items()
                    emo_now = np.array([np.array([list(e)[0], list(e)[1]]) for e in emo_now])
                    emo_now_table[emo_now[:,0].astype(int)] = emo_now[:,1] 
                    state = np.array([(emo_now_table[i]>=3 and (emo_now_table[i]-emo_his_table[i])>=0.0) for i in range(len(emo_list))]).astype(int)

                    state_int = 0
                    for i, j in enumerate(state):
                        state_int += j<<i

                    state_record.append(state_int)
                    if len(state_record) == 10:
                        state_ctr = collections.Counter(state_record)
                        final_state = state_ctr.most_common()[0][0]
                        final_states.append(final_state)
                        del state_record[:]
                    '''

                    emotion = emo_list[ctr.most_common()[0][0]]
                    #print(emotion)
                    im_show = show_detection(frame, shape_origin, 1, emotion)

                    cv2.imshow('frame', im_show)
                else:
                    cv2.imshow('frame', frame)

                if cv2.waitKey(3) & 0xFF == ord('q'):
                    break
            #else:
            #break

        #stream.release()
        cv2.destroyAllWindows()
Пример #5
0
def main():

    parser = argparse.ArgumentParser(prog='test.py',
                                     description='ref_emotion.')
    parser.add_argument('--model', type=str, default='dgn')
    args = parser.parse_args()

    lm_ph = tf.placeholder(tf.float32, [None, 51 * 2])
    img_ph = tf.placeholder(tf.float32, [None, 224, 224, 3])
    keep_prob = tf.placeholder(tf.float32)

    with tf.Session() as sess:

        dgn = vgg_face.DGN()
        # dgn.train = False
        dgn.build(lm_ph, keep_prob)
        pred_dgn = tf.argmax(dgn.prob, 1)

        dan = vgg_face.Vgg_face()
        # dan.train = False
        dan.build(img_ph, keep_prob)
        pred_dan = tf.argmax(dan.prob, 1)

        if args.model == 'weighted-sum':

            prob_sum = tf.nn.softmax(dan.fc8 + dgn.fc3)
            pred_sum = tf.argmax(prob_sum, 1)

        elif args.model == 'joint-fine-tune':

            saver = tf.train.Saver()
            saver.restore(sess, '../model/dgan.ckpt')
            prob_joint = tf.nn.softmax(dan.fc8 + dgn.fc3)
            pred_joint = tf.argmax(prob_joint, 1)

        sess.run(tf.global_variables_initializer())

        cap = cv2.VideoCapture(0)
        emo_record = (np.ones(frames, dtype=int) * 5).tolist()

        while (cap.isOpened()):

            ret, frame = cap.read()
            if ret == True:

                num, face, shape, shape_origin = dlib_detect(
                    frame, 2, face_det, lm_det, 224, 224)
                if num == 1:

                    shape_norm = shape[17:] - shape[30]
                    shape_norm = shape_norm.reshape([1, 51 * 2])
                    if args.model == 'dan':
                        pred = sess.run(pred_dan,
                                        feed_dict={
                                            img_ph:
                                            face.reshape([1, 224, 224, 3]),
                                            keep_prob: 1.0
                                        })
                    elif args.model == 'dgn':
                        pred = sess.run(pred_dgn,
                                        feed_dict={
                                            lm_ph: shape_norm,
                                            keep_prob: 1.0
                                        })
                    elif args.model == 'weighted-sum':
                        pred = sess.run(pred_dgn,
                                        feed_dict={
                                            img_ph:
                                            face.reshape([1, 224, 224, 3]),
                                            lm_ph: shape_norm,
                                            keep_prob: 1.0
                                        })
                    elif args.model == 'joint-fine-tune':
                        pred = sess.run(pred_joint,
                                        feed_dict={
                                            img_ph:
                                            face.reshape([1, 224, 224, 3]),
                                            lm_ph: shape_norm,
                                            keep_prob: 1.0
                                        })

                    emo_record.append(int(pred))
                    del emo_record[0]
                    ctr = collections.Counter(emo_record)
                    emotion = emo_list[ctr.most_common()[0][0]]
                    im_show = show_detection(frame, shape_origin, 1, emotion)

                    cv2.imshow('frame', im_show)
                else:
                    cv2.imshow('frame', frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()