type=bool,
    help='Test the model in real time with webcam connect via usb')
args = parser.parse_args()

transformation = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, ), (0.5, ))])
dataset = Plain_Dataset(csv_file=args.data + '/finaltest.csv',
                        img_dir=args.data + '/' + 'finaltest/',
                        datatype='finaltest',
                        transform=transformation)
test_loader = DataLoader(dataset, batch_size=64, num_workers=0)

net = Deep_Emotion()
print("Deep Emotion:-", net)
net.load_state_dict(torch.load(args.model))
net.to(device)
net.eval()
#Model Evaluation on test data
classes = ('Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral')
total = []
if args.test_acc:
    with torch.no_grad():
        for data, labels in test_loader:
            data, labels = data.to(device), labels.to(device)
            outputs = net(data)
            pred = F.softmax(outputs, dim=1)
            classs = torch.argmax(pred, 1)
            wrong = torch.where(classs != labels,
                                torch.tensor([1.]).cuda(),
                                torch.tensor([0.]).cuda())
Beispiel #2
0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

mapping = {
    0: 'Angry',
    1: 'Disgust',
    2: 'Fear',
    3: 'Happy',
    4: 'Sad',
    5: 'Surprise',
    6: 'Neutral'
}

net = Deep_Emotion()  # In pytorch we have to first load the model
file_name = 'deep_emotion-100-128-0.005.pt'
net.load_state_dict(torch.load(file_name))
net.to(device)

path = "haarcascade_frontalface_default.xml"
font_scale = 1.5
font = cv2.FONT_HERSHEY_PLAIN

# SET THE RECTANGLE BACKGROUND TO WHITE
rectangle_bgr = (255, 255, 255)

# MAKE A BLACK IMAGE
img = np.zeros((500, 500))

# SET SOME TEXT
text = "Some text in a box!"
validation_img_dir = 'data'+'/'+ 'val/'

transformation= transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,),(0.5,))])
train_dataset= Plain_Dataset(csv_file=traincsv_file, img_dir = train_img_dir, datatype = 'train', transform = transformation)
validation_dataset= Plain_Dataset(csv_file=validationcsv_file, img_dir = validation_img_dir, datatype = 'val', transform = transformation)
train_loader= DataLoader(train_dataset,batch_size=batchsize,shuffle = True,num_workers=0)
val_loader=   DataLoader(validation_dataset,batch_size=batchsize,shuffle = True,num_workers=0)

criterion= nn.CrossEntropyLoss()
optmizer= optim.Adam(net.parameters(),lr= lr)
Train(epochs, train_loader, val_loader, criterion, optmizer, device)

torch.save(net.state_dict(), 'new_Emotion_trained_Lea.pt')     

net = Deep_Emotion()
net.load_state_dict(torch.load('new_Emotion_trained_Lea.pt'))
net.to(device)

# Test it on a saved image:

import matplotlib.pyplot as plt

get_ipython().system('pip install opencv-python')

import cv2

frame = cv2.imread("C:\\Users\\kassa\\11-test\\Deep-Emotion-master\\happy.jpg")

get_ipython().system('pip install deepface')

from deepface import DeepFace