def main():
    model = myVGG.VGG_16()
    
    X_fname = 'C:/Users/Sai/Desktop/Facial_Expression_Recognition/Data_Generation/data/X_train_train.npy'
    y_fname = 'C:/Users/Sai/Desktop/Facial_Expression_Recognition/Data_Generation/data/y_train_train.npy'
    X_train = np.load(X_fname)
    y_train = np.load(y_fname)
    print(X_train.shape)
    print(y_train.shape)
   
    print("Training started")

    callbacks = []
    earlystop_callback = EarlyStopping(monitor='val_loss', patience=5, verbose=0)
    batch_print_callback = LambdaCallback(on_batch_begin=lambda batch, logs: print(batch))
    epoch_print_callback = LambdaCallback(on_epoch_end=lambda epoch, logs: print("epoch:", epoch))
    callbacks.append(earlystop_callback)
    callbacks.append(batch_print_callback)
    callbacks.append(epoch_print_callback)

    batch_size = 512
    model.fit(X_train, y_train, nb_epoch=400, \
            batch_size=batch_size, \
            validation_split=0.2, \
            shuffle=True, verbose=0, \
            callbacks=callbacks)

    model.save_weights('C:/Users/Sai/Desktop/Facial_Expression_Recognition/CNN_Model/my_model_weights.h5')
    scores = model.evaluate(X_train, y_train, verbose=0)
    print ("Train loss : %.3f" % scores[0])
    print ("Train accuracy : %.3f" % scores[1])
    print ("Training finished")
Ejemplo n.º 2
0
def main():
    model = myVGG.VGG_16()

    if args.test is not None:
        print("Test mode")
        img = cv2.imread(args.test)
        img = fu.preprocessing(img)
        img = np.expand_dims(img, axis=0)
        y = np.expand_dims(np.asarray([0]), axis=0)
        batch_size = 1
        model.fit(img, y, nb_epoch=400, \
                batch_size=batch_size, \
                validation_split=0.2, \
                shuffle=True, verbose=0)
        return

    #input_path = args.data_path
    #print("training data path : " + input_path)
    #X_train, y_train = fu.extract_features(input_path)
    X_fname = '../data/X_train_train.npy'
    y_fname = '../data/y_train_train.npy'
    X_train = np.load(X_fname)
    y_train = np.load(y_fname)
    print(X_train.shape)
    print(y_train.shape)

    print("Training started")

    callbacks = []
    earlystop_callback = EarlyStopping(monitor='val_loss',
                                       patience=5,
                                       verbose=0)
    batch_print_callback = LambdaCallback(
        on_batch_begin=lambda batch, logs: print(batch))
    epoch_print_callback = LambdaCallback(
        on_epoch_end=lambda epoch, logs: print("epoch:", epoch))
    callbacks.append(earlystop_callback)
    callbacks.append(batch_print_callback)
    callbacks.append(epoch_print_callback)

    batch_size = 512
    model.fit(X_train, y_train, nb_epoch=400, \
            batch_size=batch_size, \
            validation_split=0.2, \
            shuffle=True, verbose=0, \
            callbacks=callbacks)

    model.save_weights('my_model_weights.h5')
    scores = model.evaluate(X_train, y_train, verbose=0)
    print("Train loss : %.3f" % scores[0])
    print("Train accuracy : %.3f" % scores[1])
    print("Training finished")
Ejemplo n.º 3
0
def main():
    model = myVGG.VGG_16('my_model_weights.h5')

    if args.image is not None:
        print('Image Prediction Mode')
        img = fu.preprocessing(cv2.imread(args.image))
        X = np.expand_dims(img, axis=0)
        X = np.expand_dims(X, axis=0)
        result = model.predict(X)
        print(result)
        return
    elif args.dataset is not None:
        print("Directory Prediction Mode")
        X, y = fu.extract_features(args.dataset)
        scores = model.evaluate(X, y, verbose=0)
        print(scores)
        return
Ejemplo n.º 4
0
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)

os.environ['KERAS_BACKEND'] = 'tensorflow'

sys.path.append('C:/Users/Sai/Desktop/Facial_Expression_Recognition/CNN_Model')
sys.path.append(
    'C:/Users/Sai/Desktop/Facial_Expression_Recognition/Model_Training')

emotion = ['Angry', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']

import myVGG

model = myVGG.VGG_16(
    'C:/Users/Sai/Desktop/Facial_Expression_Recognition/Model_Training/my_model_weights.h5'
)

import cv2
import numpy as np


def preprocessing(img, size=(48, 48)):
    img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    img = cv2.resize(img, size).astype(np.float32)
    return img


def predict_emotion(gray_face):
    resized_img = cv2.resize(gray_face, (48, 48), interpolation=cv2.INTER_AREA)
    image = resized_img.reshape(1, 1, 48, 48)
import myVGG

import cv2
import numpy as np

parser = argparse.ArgumentParser(description=("Testing Prediction"))
parser.add_argument('--image',
                    help=('Input an image to test model prediction'))
parser.add_argument('--dataset',
                    help=('Input a directory to test model prediction'))
parser.add_argument('--model', help=('Input a trained model'))

args = parser.parse_args()

if args.model:
    model = myVGG.VGG_16(args.model)


def main():
    model = myVGG.VGG_16('my_model_weights.h5')

    if args.image is not None:
        print('Image Prediction Mode')
        img = fu.preprocessing(cv2.imread(args.image))
        X = np.expand_dims(img, axis=0)
        X = np.expand_dims(X, axis=0)
        result = model.predict(X)

        print("class distribution: ", result)
        print("The best: ", np.argmax(result[0], 0))
        return