iaa.Flipud(0.5), iaa.Rot90((0, 4)), # Blur and Noise iaa.Sometimes(0.2, iaa.GaussianBlur(sigma=(0.25, 1.5), name="gaus-blur")), iaa.Sometimes( 0.2, iaa.AdditiveLaplaceNoise(scale=(0, 0.1 * 255), per_channel=True, name="gaus-noise")), ]) input_only = ["gaus-blur", "gaus-noise"] db_train_list = [] for dataset in config.train.datasetsTrain: db = dataloader.SurfaceNormalsDataset(input_dir=dataset.images, label_dir=dataset.labels, transform=augs_train, input_only=input_only) train_size = int(config.train.percentageDataForTraining * len(db)) db = torch.utils.data.Subset(db, range(train_size)) db_train_list.append(db) db_train = torch.utils.data.ConcatDataset(db_train_list) # Validation Dataset augs_test = iaa.Sequential([ iaa.Resize({ "height": config.eval.imgHeight, "width": config.eval.imgWidth }, interpolation='nearest'), ])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Enable Multi-GPU training if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs net = nn.DataParallel(net) augs_train = iaa.Sequential([ iaa.Scale((imsize, imsize), 0), ]) db_train = dataloader.SurfaceNormalsDataset( input_dir='data/datasets/train/milk-bottles-train/resized-files/preprocessed-rgb-imgs', label_dir='data/datasets/train/milk-bottles-train/resized-files/preprocessed-camera-normals', transform=augs_train, input_only=None, ) trainLoader = DataLoader(db_train, batch_size=p['trainBatchSize'], shuffle=True, num_workers=32, drop_last=True) # %matplotlib inline lr_finder = LRFinder(net, optimizer, criterion, device="cuda") lr_finder.range_test(trainLoader, end_lr=1, num_iter=100) lr_finder.plot() plt.show()
print( colored( 'The dir to store results "{}" does not exist. Creating dir'. format(DIR_RESULTS_SYNTHETIC), 'red')) os.makedirs(DIR_RESULTS_SYNTHETIC) ###################### DataLoader ############################# print(colored('Will Run inference on these Test sets:', 'green')) # Make new pytorch datasets for each synthetic dataset db_test_list_synthetic = [] for dataset in config.eval.datasetsSynthetic: print('Creating Synthetic Images dataset from: "{}"'.format( dataset.images)) if dataset.images: dataset = dataloader.SurfaceNormalsDataset(input_dir=dataset.images, label_dir=dataset.labels, transform=None, input_only=None) db_test_list_synthetic.append(dataset) # Make new pytorch datasets for each real dataset db_test_list_real = [] for dataset in config.eval.datasetsReal: print('Creating Real Images dataset from: "{}"'.format(dataset.images)) if dataset.images: dataset = dataloader.SurfaceNormalsRealImagesDataset( input_dir=dataset.images, imgHeight=config_checkpoint.train.imgHeight, imgWidth=config_checkpoint.train.imgWidth) db_test_list_real.append(dataset) # Create pytorch dataloaders from datasets