def main():
    model = vc.CNN_16()

    X_fname = 'X_train_train.npy'
    y_fname = 'y_train_train.npy'
    X_train = np.load(X_fname)
    y_train = np.load(y_fname)
    print(X_train.shape)
    print(y_train.shape)
   
    print("Training started")

    callbacks = []
    earlystop_callback = EarlyStopping(monitor='val_loss', patience=5, verbose=0)
    batch_print_callback = LambdaCallback(on_batch_begin=lambda batch, logs: print(batch))
    epoch_print_callback = LambdaCallback(on_epoch_end=lambda epoch, logs: print("epoch:", epoch))
    callbacks.append(earlystop_callback)
    callbacks.append(batch_print_callback)
    callbacks.append(epoch_print_callback)

    batch_size = 512
    model.fit(X_train, y_train, nb_epoch=400, \
            batch_size=batch_size, \
            validation_split=0.2, \
            shuffle=True, verbose=0, \
            callbacks=callbacks)

    model.save_weights('my_model_weights.h5')
    scores = model.evaluate(X_train, y_train, verbose=0)
    print ("Train loss : %.3f" % scores[0])
    print ("Train accuracy : %.3f" % scores[1])
    print ("Training finished")
示例#2
0
def main():
    model = vgg.CNN_16('final_weights.h5')

    print ('Image Prediction Mode')
    img = preprocessing(cv2.imread('girl2.jpg'))

    X = np.expand_dims(img, axis=0)
    X = np.expand_dims(X, axis=0)
    # predict_classes will enable us to select most probable class
    result = model.predict_classes(X)
    print(result)
def main():
    model = vgg.CNN_16(
        'C:/Users/Harsh/PycharmProjects/EMOTION_classifier_project/Model_Training/final_weights.h5'
    )
    print('Image Prediction Mode')
    img = preprocessing(
        cv2.imread(
            'C:/Users/Harsh/PycharmProjects/EMOTION_classifier_project/girl2.jpg'
        ))
    X = np.expand_dims(img, axis=0)
    X = np.expand_dims(X, axis=0)
    # predict_classes will enable us to select most probable class
    result = model.predict_classes(X)
    print(result)
示例#4
0
valid_data = training[6000:]  # 2000 for validation
train_data = training[:6000]  # 6000 for train

print("test set size:", len(test_data))
print("validation set size:", len(valid_data))
print("train set size:", len(train_data))

train_set = CustomDataset(train_data, transform=transforms.ToTensor())
valid_set = CustomDataset(valid_data, transform=transforms.ToTensor())
test_set = CustomDataset(test_data, transform=transforms.ToTensor())

train_dataloader = DataLoader(dataset=train_set, batch_size=100, shuffle=True)
test_dataloader = DataLoader(dataset=test_set, batch_size=1, shuffle=True)
valid_dataloader = DataLoader(dataset=valid_set, batch_size=1, shuffle=True)

model = CNN().to(device)


def train(model, train_dataloader, valid_dataloader, device):
    model.train()
    criterion = nn.MultiLabelSoftMarginLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    for epoch in range(15):
        model.train()
        for i, (images, labels) in enumerate(train_dataloader):
            images = images.to(device)
            labels = labels.to(device)

            predict = model(images)
            optimizer.zero_grad()
            loss = criterion(predict, labels)
示例#5
0
    'training_set_r10',
    'training_set_r11',
]

training_set_dict = {}
for training_set in training_set_list:
    training_set_dict.update({str(training_set): eval(training_set)})

training_set = torch.utils.data.ConcatDataset(training_set_dict.values())
training_loader = DataLoader(training_set,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=in_workers)

# Define the model
model = CNN(as_gray=as_grayscale, use_convcoord=use_convcoord)
model.double().to(device=device)
print(model)

# Loss and optimizer
criterion = nn.SmoothL1Loss()
optimizer = torch.optim.SGD(model.parameters(),
                            lr=learning_rate,
                            weight_decay=1e-3)

#################################
#        Train the model        #
#################################

total_step = len(training_loader)
model_save_name = ''.join([
import sys,cv2
import numpy as np

import CNN_Model.CNN as vc

windowsName = 'Preview Screen'

CASCADE_PATH = "C:/Users/Harsh/PycharmProjects/EMOTION_classifier/Prediction_Video/haarcascade_frontalface_default.xml"
cascade = cv2.CascadeClassifier(CASCADE_PATH)
emotion = ['Angry', 'Fear', 'Happy','Sad', 'Surprise', 'Neutral']
model = vc.CNN_16('C:/Users/Harsh/PycharmProjects/EMOTION_classifier/Model_Training/final_weights.h5')

capture = cv2.VideoCapture("C:/Users/Harsh/PycharmProjects/EMOTION_classifier/Vid2.mp4")
#capture = cv2.VideoCapture(0)

def grayFace(image):
    img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    img_gray = cv2.equalizeHist(img_gray)
    return img_gray

def getFaceCoordinates(image):
    img_gray = grayFace(image)
    rects = cascade.detectMultiScale(
        img_gray,
        scaleFactor=1.3,
        minNeighbors=2,
        #scaleFactor=1.1,
        #minNeighbors=3,
        minSize=(48, 48)
        )
    return rects
import sys, cv2
import numpy as np

import CNN_Model.CNN as vc

windowsName = 'Preview Screen'

CASCADE_PATH = "D:/ML/emotiondetector/venv/Prediction_Video/haarcascade_frontalface_default.xml"
cascade = cv2.CascadeClassifier(CASCADE_PATH)
emotion = ['Angry', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
model = vc.CNN_16('my_model_weights.h5')

capture = cv2.VideoCapture("Vid2.mp4")


def grayFace(image):
    img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    img_gray = cv2.equalizeHist(img_gray)
    return img_gray


def getFaceCoordinates(image):
    img_gray = grayFace(image)
    rects = cascade.detectMultiScale(
        img_gray,
        scaleFactor=1.3,
        minNeighbors=2,
        #scaleFactor=1.1,
        #minNeighbors=3,
        minSize=(48, 48))
    return rects