reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience / 4), verbose=1) trained_models_path = base_path + '_mini_XCEPTION' model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5' model_checkpoint = ModelCheckpoint(model_names, 'val_loss', verbose=1, save_best_only=True) callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr] # loading dataset faces, emotions = load_fer2013() faces = preprocess_input(faces) num_samples, num_classes = emotions.shape xtrain, xtest, ytrain, ytest = train_test_split(faces, emotions, test_size=0.2, shuffle=True) model.fit_generator(data_generator.flow(xtrain, ytrain, batch_size), steps_per_epoch=len(xtrain) / batch_size, epochs=num_epochs, verbose=1, callbacks=callbacks, validation_data=(xtest, ytest)) model.save("hdf.h5")
def run(self, frame_in, canvas, label_face, label_result): # frame_in 摄像画面或图像 # canvas 用于显示的背景图 # label_face 用于人脸显示画面的label对象 # label_result 用于显示结果的label对象 # 调节画面大小 frame = imutils.resize(frame_in, width=300) # 缩放画面 # frame = cv2.resize(frame, (300,300)) # 缩放画面 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 转为灰度图 # 检测人脸 faces = self.face_detection.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE) preds = [] # 预测的结果 label = None # 预测的标签 (fX, fY, fW, fH) = None, None, None, None # 人脸位置 frameClone = frame.copy() # 复制画面 if len(faces) > 0: faces = sorted(faces, reverse=False, key=lambda x: (x[2] - x[0]) * (x[3] - x[1])) # 按面积从小到大排序 for i in range(len(faces)): # if i == 0: # i = -1 # else: # break (fX, fY, fW, fH) = faces[i] roi = gray[fY:fY + fH, fX:fX + fW] roi = cv2.resize(roi, self.emotion_classifier.input_shape[1:3]) roi = preprocess_input(roi) roi = img_to_array(roi) roi = np.expand_dims(roi, axis=0) preds = self.emotion_classifier.predict(roi)[0] # emotion_probability = np.max(preds) # label = self.EMOTIONS[preds.argmax()] # cv2.putText(frameClone, label, (fX, fY - 10), cv2.FONT_HERSHEY_TRIPLEX, 0.4, (0, 255, 0), 1) cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (255, 255, 0), 1) # canvas = 255* np.ones((250, 300, 3), dtype="uint8") # canvas = cv2.imread('slice.png', flags=cv2.IMREAD_UNCHANGED) for (i, (emotion, prob)) in enumerate(zip(self.EMOTIONS, preds)): # 用于显示各类别概率 text = "{}: {:.2f}%".format(emotion, prob * 100) w = int(prob * 300) + 7 cv2.rectangle(canvas, (7, (i * 35) + 5), (w, (i * 35) + 35), (224, 200, 130), -1) cv2.putText(canvas, text, (10, (i * 35) + 23), cv2.FONT_HERSHEY_DUPLEX, 0.6, (0, 0, 0), 1) frameClone = cv2.resize(frameClone, (420, 280)) show = cv2.cvtColor(frameClone, cv2.COLOR_BGR2RGB) showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888) label_face.setPixmap(QtGui.QPixmap.fromImage(showImage)) QtWidgets.QApplication.processEvents() show = cv2.cvtColor(canvas, cv2.COLOR_BGR2RGB) showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888) label_result.setPixmap(QtGui.QPixmap.fromImage(showImage)) return (label)