def make_dataset_cuda(path, train_size, valid_size, randaug_params, mixup, cutmix): train_dir = os.path.join(nsml.DATASET_PATH, path, "train.lmdb") valid_dir = os.path.join(nsml.DATASET_PATH, path, "valid.lmdb") normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) train_preprocess = transforms.Compose( [ transforms.RandomResizedCrop(train_size, interpolation=Image.BICUBIC), transforms.RandomHorizontalFlip(), ] ) train_postprocess = transforms.Compose( [RandAugment(**randaug_params), transforms.ToTensor(), normalize] ) train_set = LMDBDataset(train_dir, train_preprocess) train_set = MixDataset(train_set, train_postprocess, mixup, cutmix) valid_preprocess = transforms.Compose( [ transforms.Resize(valid_size + 32, interpolation=Image.BICUBIC), transforms.CenterCrop(valid_size), transforms.ToTensor(), normalize, ] ) valid_set = LMDBDataset(valid_dir, valid_preprocess) return train_set, valid_set
def make_augment_dataset(path, train_transform, valid_transform): train_dir = os.path.join(nsml.DATASET_PATH, path, "train.lmdb") valid_dir = os.path.join(nsml.DATASET_PATH, path, "valid.lmdb") train_set = LMDBDataset(train_dir, train_transform) valid_set = LMDBDataset(valid_dir, valid_transform) return train_set, valid_set
def load_datasets(args, experiment_name, num_workers, dataset): db_name = util_funcs.create_checkpoint_name( experiment_name, args.ckpt_epoch)[:-3] + '_dataset[{}]'.format(dataset) train_dataset = LMDBDataset( os.path.join('..', 'codes', 'train_codes', db_name), args.architecture) test_dataset = LMDBDataset( os.path.join('..', 'codes', 'test_codes', db_name), args.architecture) train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, num_workers=num_workers, drop_last=True) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=True, num_workers=num_workers, drop_last=True) return test_loader, train_loader
if opt.manual_seed is None: opt.manual_seed = random.randint(1, 10000) random.seed(opt.manual_seed) np.random.seed(opt.manual_seed) torch.manual_seed(opt.manual_seed) # Use cudnn cudnn.benchmark = True if torch.cuda.is_available() and not opt.cuda: print( "WARNING: You have a CUDA device, so you should probably run with --cuda" ) # Prepare training and testing data train_datasets = [LMDBDataset(x) for x in opt.lmdb_paths] train_dataset = torch.utils.data.ConcatDataset(train_datasets) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, sampler=None, num_workers=int(opt.workers), collate_fn=utils.AlignCollate( im_h=opt.img_h, im_w=opt.img_w)) print('Num of Training Images: %s' % len(train_dataset)) test_dataset = TestDataset(list_file=opt.val_list, alphabet=alphabet, transform=utils.ResizeNormalize( (opt.img_w, opt.img_h)))
parser.add_argument('--n_res_channel', type=int, default=256) parser.add_argument('--n_out_res_block', type=int, default=0) parser.add_argument('--n_cond_res_block', type=int, default=3) parser.add_argument('--dropout', type=float, default=0.1) parser.add_argument('--amp', type=str, default='O0') parser.add_argument('--sched', type=str) parser.add_argument('--ckpt', type=str) parser.add_argument('path', type=str) args = parser.parse_args() print(args) device = 'cuda' dataset = LMDBDataset(args.path) loader = DataLoader(dataset, batch_size=args.batch, shuffle=True, num_workers=4, drop_last=True) ckpt = {} if args.ckpt is not None: ckpt = torch.load(args.ckpt) args = ckpt['args'] if args.hier == 'top': model = PixelSNAIL( [32, 32],
def make_dataset( path, train_size, valid_size, randaug_params, mix_params, erasing, verbose=True ): train_dir = os.path.join(nsml.DATASET_PATH, path, "train.lmdb") valid_dir = os.path.join(nsml.DATASET_PATH, path, "valid.lmdb") normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) transform_list = [ transforms.RandomResizedCrop(train_size, interpolation=Image.BICUBIC), transforms.RandomHorizontalFlip(), RandAugment(**randaug_params), transforms.ToTensor(), normalize, ] if erasing > 0: transform_list += [ RandomErasing( erasing, mode="pixel", max_count=1, num_splits=0, device="cpu" ) ] if mix_params["mix_before_aug"]: preprocess = transform_list[:2] postprocess = transform_list[2:] else: preprocess = transform_list postprocess = [] if verbose: logger = get_logger() log = f"""Transforms Transform before Mixes: {preprocess} Mixes: mixup={mix_params["mixup"]}, cutmix={mix_params["cutmix"]}""" if mix_params["mix_before_aug"]: log += f""" Transform after Mixes: {postprocess}""" logger.info(log) train_preprocess = transforms.Compose(preprocess) train_postprocess = transforms.Compose(postprocess) train_set = LMDBDataset(train_dir, train_preprocess) train_set = MixDataset( train_set, train_postprocess, mix_params["mixup"], mix_params["cutmix"] ) valid_preprocess = transforms.Compose( [ transforms.Resize(valid_size + 32, interpolation=Image.BICUBIC), transforms.CenterCrop(valid_size), transforms.ToTensor(), normalize, ] ) valid_set = LMDBDataset(valid_dir, valid_preprocess) return train_set, valid_set
parser.add_argument('--image_w', default=100, help='the width of the input image to the network') parser.add_argument('--nh', default=256, help='size of the lstm hidden state') parser.add_argument('--batch_size', default=4, help='batch size') parser.add_argument('--nepoch', default=100, help='number of epoch') option = parser.parse_args() if not os.path.exists('model'): os.mkdir('model') cudnn.benchmark = True trainset = LMDBDataset(option.trainset_path, transform=transforms.Compose([ transforms.Resize((option.image_h, option.image_w)), transforms.ToTensor() ])) trainset_dataloader = DataLoader(trainset, batch_size=option.batch_size, shuffle=True) validationset = LMDBDataset(option.validationset_path, transform=transforms.Compose([ transforms.Resize( (option.image_h, option.image_w)), transforms.ToTensor() ])) validationset_dataloader = DataLoader(validationset, batch_size=option.batch_size, shuffle=True)
parser.add_argument('--n_out_res_block', type=int, default=0) parser.add_argument('--n_cond_res_block', type=int, default=3) parser.add_argument('--dropout', type=float, default=0.1) parser.add_argument('--amp', type=str, default='O0') parser.add_argument('--device', type=str, default='cuda') parser.add_argument('--sched', type=str) parser.add_argument('--ckpt', type=str) parser.add_argument('path', type=str) args = parser.parse_args() print(args) device = args.device dataset = LMDBDataset(f"lmdb/{args.path}") loader = DataLoader( dataset, batch_size=args.batch, shuffle=True, num_workers=4, drop_last=True) ckpt = {} if args.ckpt is not None: ckpt = torch.load(args.ckpt) args = ckpt['args'] if args.hier == 'top': model = PixelSNAIL(