Beispiel #1
0
def evaluate(dataset, input_shape, model_path):
    if os.path.exists(model_path):
        model = load_model(model_path)
    else:
        raise Exception("The model doesn't exist!")

    truth, prediction = [], []
    print("load data ...")
    if dataset == 'RAF':
        # loading dataset
        data_loader = DataManager(dataset, image_size=input_shape[:2])
        faces, emotions, usages = data_loader.get_data()
        faces = process_img(faces)
        train_data, val_data = split_raf_data(faces, emotions, usages)
        data, label = val_data
        count = len(label)
        correct = 0
        for i, d in enumerate(data):
            if i % 200 == 0:
                plot_progress(i, count)
            d = np.expand_dims(d, 0)
            emotion_values = model.predict(d)
            emotion_label_arg = np.argmax(emotion_values)
            p = int(emotion_label_arg)
            t = int(np.argmax(label[i]))
            if p == t:
                correct += 1
            prediction.append(p)
            truth.append(t)
        accuracy = correct / float(count)
        print(correct, count, accuracy)

    else:
        raise Exception("RAF only!")

    return truth, prediction, accuracy
Beispiel #2
0
emotion_model = choose_net(USE_EMOTION_MODEL, INPUT_SHAPE, EMOTION_NUM_CLS)
sgd = optimizers.SGD(lr=LEARNING_RATE, decay=LEARNING_RATE/BATCH_SIZE, momentum=0.9, nesterov=True)
emotion_model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

# callbacks
csv_logger = CSVLogger(EMOTION_LOG_NAME, append=False)
early_stop = EarlyStopping('val_loss', patience=PATIENCE)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(PATIENCE/4), verbose=1)
# model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
model_checkpoint = ModelCheckpoint(EMOTION_MODEL_NAME, 'val_loss', verbose=1,
                                   save_weights_only=False, save_best_only=True)
callbacks = [model_checkpoint, csv_logger, reduce_lr, early_stop]

# loading dataset
data_loader = DataManager(USE_EMOTION_DATASET, image_size=INPUT_SHAPE[:2])
faces, emotions, usages = data_loader.get_data()
faces = process_img(faces)
num_samples, num_classes = emotions.shape
train_data, val_data = split_raf_data(faces, emotions, usages)
train_faces, train_emotions = train_data

# if os.path.exists(EMOTION_MODEL_NAME):
#     emotion_net = load_model(EMOTION_MODEL_NAME)

emotion_model.fit_generator(data_generator.flow(train_faces, train_emotions, BATCH_SIZE),
                            steps_per_epoch=len(train_faces) / BATCH_SIZE,epochs=EPOCHS,
                            verbose=1, callbacks=callbacks, validation_data=val_data)


if IS_CONVERT2TFLITE:
    converter = lite.TFLiteConverter.from_keras_model_file(EMOTION_MODEL_NAME)