def test_multiple_setups(n_epochs, batches, optimizer_types, learning_rates): # Chain transformations using Compose data_transform = transforms.Compose( [Rescale(250), RandomCrop(224), Normalize(), ToTensor()]) transformed_dataset = FacialKeyPointsDataset( csv_file='data/training_frames_keypoints.csv', root_dir='data/training/', transform=data_transform) neural_net = NaimishNet() neural_net.to(device) # Use a for loop to test and see how the variables influence the model accuracy for batch in batches: data_loader = DataLoader(transformed_dataset, batch_size=batch, shuffle=True, num_workers=4) for optimizer in optimizer_types: for lr in learning_rates: print("Batch: ", batch) print("Optimizer: ", optimizer) print("Learning rate: ", lr) print("NNet: ", "NaimishNet") train_net(neural_net, n_epochs, nn.MSELoss(), data_loader, optimizer, lr)
def train_and_save(n_epoch, batch_size, optimizer, learning_rate, path): data_transform = transforms.Compose( [Rescale(250), RandomCrop(224), Normalize(), ToTensor()]) transformed_dataset = FacialKeypointsDataset( csv_file='data/training_frames_keypoints.csv', root_dir='data/training/', transform=data_transform) data_loader = DataLoader(transformed_dataset, batch_size=batch_size, shuffle=True, num_workers=4) net = KurbakovNet() train_net(net, n_epoch, nn.MSELoss(), data_loader, optimizer, learning_rate) torch.save(net.state_dict(), path)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_num # gpus 0, 1 device0 = torch.device('cuda:0') device1 = torch.device('cuda:1') if not torch.cuda.is_available(): raise Exception("No GPU found") else: print("===> GPU on") # Normalization method mean = get_mean(args.norm_value) std = get_std(args.norm_value) norm_method = Normalize(mean, std) # path setting train_path = join(args.root_path, args.train_data) pretrain_path = join(args.root_path, args.pretrain_path) log_path = join(args.root_path, args.log_path) print("===> Loading datasets") train_data = DatasetFromFolder(train_path, args.resize_w, args.resize_h, args.crop_size, args.fps, args.train_frames, args.horizontal_flip, args.norm_value, norm_method, args.num_of_vid, args.train_frames) train_data_loader = DataLoader(dataset=train_data, num_workers=4, batch_size=args.batch_size,