Ejemplo n.º 1
0
#         lr = 0.005
#         batchsize = 128

#     if args.train:
#         net = Deep_Emotion()
#         net.to(device)
#         print("Model archticture: ", net)
#         traincsv_file = args.data+'/'+'train.csv'
#         validationcsv_file = args.data+'/'+'val.csv'
#         train_img_dir = args.data+'/'+'train/'
#         validation_img_dir = args.data+'/'+'val/'

epochs = 100
lr = 0.005  # Learning rate
batchsize = 128
net = Deep_Emotion()  ## CREATING THE MODEL BY CALLING DEEPMOTION.PY
net.to(device)  ## MOVING IT TO GPU / CPU
print("Model archticture: ", net)
traincsv_file = 'data' + '/' + 'train.csv'
validationcsv_file = 'data' + '/' + 'val.csv'
train_img_dir = 'data' + '/' + 'train/'
validation_img_dir = 'data' + '/' + 'val/'

transformation = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, ), (0.5, ))])
train_dataset = Plain_Dataset(csv_file=traincsv_file,
                              img_dir=train_img_dir,
                              datatype='train',
                              transform=transformation)
validation_dataset = Plain_Dataset(csv_file=validationcsv_file,
Ejemplo n.º 2
0
        generate_dataset.split_test()
        generate_dataset.save_images()
        generate_dataset.save_images('finaltest')
        generate_dataset.save_images('val')

    if args.hyperparams:
        epochs = args.epochs
        lr = args.learning_rate
        batchsize = args.batch_size
    else:
        epochs = 100
        lr = 0.005
        batchsize = 128

    if args.train:
        net = Deep_Emotion()
        net.to(device)
        print("Model archticture: ", net)
        traincsv_file = args.data + '/' + 'train.csv'
        validationcsv_file = args.data + '/' + 'val.csv'
        train_img_dir = args.data + '/' + 'train/'
        validation_img_dir = args.data + '/' + 'val/'

        transformation = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Normalize((0.5, ), (0.5, ))])
        train_dataset = Plain_Dataset(csv_file=traincsv_file,
                                      img_dir=train_img_dir,
                                      datatype='train',
                                      transform=transformation)
        validation_dataset = Plain_Dataset(csv_file=validationcsv_file,
Ejemplo n.º 3
0
from deep_emotion import Deep_Emotion
from generate_data import Generate_data

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

mapping = {
    0: 'Angry',
    1: 'Disgust',
    2: 'Fear',
    3: 'Happy',
    4: 'Sad',
    5: 'Surprise',
    6: 'Neutral'
}

net = Deep_Emotion()  # In pytorch we have to first load the model
file_name = 'deep_emotion-100-128-0.005.pt'
net.load_state_dict(torch.load(file_name))
net.to(device)

path = "haarcascade_frontalface_default.xml"
font_scale = 1.5
font = cv2.FONT_HERSHEY_PLAIN

# SET THE RECTANGLE BACKGROUND TO WHITE
rectangle_bgr = (255, 255, 255)

# MAKE A BLACK IMAGE
img = np.zeros((500, 500))

# SET SOME TEXT
    '-c',
    '--cam',
    type=bool,
    help='Test the model in real time with webcam connect via usb')
args = parser.parse_args()

transformation = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, ), (0.5, ))])
dataset = Plain_Dataset(csv_file=args.data + '/finaltest.csv',
                        img_dir=args.data + '/' + 'finaltest/',
                        datatype='finaltest',
                        transform=transformation)
test_loader = DataLoader(dataset, batch_size=64, num_workers=0)

net = Deep_Emotion()
print("Deep Emotion:-", net)
net.load_state_dict(torch.load(args.model))
net.to(device)
net.eval()
#Model Evaluation on test data
classes = ('Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral')
total = []
if args.test_acc:
    with torch.no_grad():
        for data, labels in test_loader:
            data, labels = data.to(device), labels.to(device)
            outputs = net(data)
            pred = F.softmax(outputs, dim=1)
            classs = torch.argmax(pred, 1)
            wrong = torch.where(classs != labels,
    torch.save(net.state_dict(),'deep_emotion-{}-{}-{}.pt'.format(epochs,batchsize,lr))

    print("===================================Training Finished===================================")
    
generate_dataset = Generate_data("data//")
generate_dataset.split_test()
generate_dataset.save_images()
generate_dataset.save_images('finaltest')
generate_dataset.save_images('val')

epochs = 100
lr = 0.005
batchsize = 128


net = Deep_Emotion()
net.to(device)
print("Model archticture: ", net)
traincsv_file = 'data'+'/'+'train.csv'  #remove
validationcsv_file = 'data'+'/'+'val.csv'
train_img_dir = 'data'+'/'+'train/'
validation_img_dir = 'data'+'/'+ 'val/'

transformation= transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,),(0.5,))])
train_dataset= Plain_Dataset(csv_file=traincsv_file, img_dir = train_img_dir, datatype = 'train', transform = transformation)
validation_dataset= Plain_Dataset(csv_file=validationcsv_file, img_dir = validation_img_dir, datatype = 'val', transform = transformation)
train_loader= DataLoader(train_dataset,batch_size=batchsize,shuffle = True,num_workers=0)
val_loader=   DataLoader(validation_dataset,batch_size=batchsize,shuffle = True,num_workers=0)

criterion= nn.CrossEntropyLoss()
optmizer= optim.Adam(net.parameters(),lr= lr)
Ejemplo n.º 6
0
        generate_dataset.split_test()
        generate_dataset.save_images()
        generate_dataset.save_images('finaltest')
        generate_dataset.save_images('val')

    if args.hyperparams:
        epochs = args.epochs
        lr = args.learning_rate
        batchsize = args.batch_size
    else:
        epochs = 100
        lr = 0.0001
        batchsize = 16

    if args.train:
        net = Deep_Emotion()
        net.to(device)
        print("Model archticture: ", net)
        traincsv_file = args.data + '/' + 'train.csv'
        validationcsv_file = args.data + '/' + 'val.csv'
        train_img_dir = args.data + '/' + 'train/'
        validation_img_dir = args.data + '/' + 'val/'

        transformation2 = tfs.Compose(
            [tfs.ToTensor(), tfs.Normalize((0.5, ), (0.5, ))])
        train_dataset = Plain_Dataset(csv_file=traincsv_file,
                                      img_dir=train_img_dir,
                                      datatype='train',
                                      transform=train_tf)
        validation_dataset = Plain_Dataset(csv_file=validationcsv_file,
                                           img_dir=validation_img_dir,
Ejemplo n.º 7
0
        epochs = 100
        lr = 0.005
        batchsize = 128

    if args.channel50:
        num_channel = 50
    else:
        num_channel = 10

    if args.stn:
        stn = args.stn
    else:
        stn = True

    if args.train:
        net = Deep_Emotion(num_channel, stn)
        net.to(device)
        print("Model archticture: ", net)
        traincsv_file = args.data + '/' + 'train.csv'
        validationcsv_file = args.data + '/' + 'val.csv'
        train_img_dir = args.data + '/' + 'train/'
        validation_img_dir = args.data + '/' + 'val/'

        if args.regularizer:
            regularizer = args.regularizer
        else:
            regularizer = False

        transformation = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Normalize((0.5, ), (0.5, ))])