generate_dataset.split_test() generate_dataset.save_images() generate_dataset.save_images('finaltest') generate_dataset.save_images('val') if args.hyperparams: epochs = args.epochs lr = args.learning_rate batchsize = args.batch_size else: epochs = 100 lr = 0.005 batchsize = 128 if args.train: net = Deep_Emotion() net.to(device) print("Model archticture: ", net) traincsv_file = args.data + '/' + 'train.csv' validationcsv_file = args.data + '/' + 'val.csv' train_img_dir = args.data + '/' + 'train/' validation_img_dir = args.data + '/' + 'val/' transformation = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, ))]) train_dataset = Plain_Dataset(csv_file=traincsv_file, img_dir=train_img_dir, datatype='train', transform=transformation) validation_dataset = Plain_Dataset(csv_file=validationcsv_file,
from deep_emotion import Deep_Emotion from generate_data import Generate_data device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") mapping = { 0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral' } net = Deep_Emotion() # In pytorch we have to first load the model file_name = 'deep_emotion-100-128-0.005.pt' net.load_state_dict(torch.load(file_name)) net.to(device) path = "haarcascade_frontalface_default.xml" font_scale = 1.5 font = cv2.FONT_HERSHEY_PLAIN # SET THE RECTANGLE BACKGROUND TO WHITE rectangle_bgr = (255, 255, 255) # MAKE A BLACK IMAGE img = np.zeros((500, 500)) # SET SOME TEXT
# lr = 0.005 # batchsize = 128 # if args.train: # net = Deep_Emotion() # net.to(device) # print("Model archticture: ", net) # traincsv_file = args.data+'/'+'train.csv' # validationcsv_file = args.data+'/'+'val.csv' # train_img_dir = args.data+'/'+'train/' # validation_img_dir = args.data+'/'+'val/' epochs = 100 lr = 0.005 # Learning rate batchsize = 128 net = Deep_Emotion() ## CREATING THE MODEL BY CALLING DEEPMOTION.PY net.to(device) ## MOVING IT TO GPU / CPU print("Model archticture: ", net) traincsv_file = 'data' + '/' + 'train.csv' validationcsv_file = 'data' + '/' + 'val.csv' train_img_dir = 'data' + '/' + 'train/' validation_img_dir = 'data' + '/' + 'val/' transformation = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, ))]) train_dataset = Plain_Dataset(csv_file=traincsv_file, img_dir=train_img_dir, datatype='train', transform=transformation) validation_dataset = Plain_Dataset(csv_file=validationcsv_file,
dataset = Plain_Dataset(csv_file=args.file, img_dir=args.data, datatype='finaltest', transform=transformation) if args.channel50: num_channel = 50 else: num_channel = 10 if args.stn: stn = args.stn else: stn = True net = Deep_Emotion(num_channel, stn) net.load_state_dict(torch.load(args.model)) net.to(device) net.eval() classes = ('Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral') if args.test_acc: test_loader = DataLoader(dataset, batch_size=64, num_workers=0) total = [] confusion_matrix = torch.zeros((len(classes), len(classes)), device=device) with torch.no_grad(): for data, labels in test_loader: data, labels = data.to(device), labels.to(device) outputs = net(data)