def get_tusimple(params): # augmentation flip = Flip() translate = Translate() rotate = Rotate() add_noise = AddGaussianNoise() change_intensity = ChangeIntensity() resize = Resize(rows=256, cols=512) norm_to_1 = NormalizeInstensity() whc_to_cwh = TransposeNumpyArray((2, 0, 1)) train_dataset = DatasetTusimple( root_path=params.train_root_url, json_files=params.train_json_file, transform=transforms.Compose([ flip, translate, rotate, add_noise, change_intensity, resize, norm_to_1, whc_to_cwh ]), ) val_dataset = DatasetTusimple( params.val_root_url, params.val_json_file, transform=transforms.Compose([resize, norm_to_1, whc_to_cwh]), ) return train_dataset, val_dataset
def __init__(self, settings): super(SRTrainer, self).__init__(settings) self.scale = settings.scale self.criterion = ComLoss(settings.iqa_model_path, settings.__dict__.get('weights'), settings.__dict__.get('feat_names'), settings.alpha, settings.iqa_patch_size, settings.criterion) if hasattr(self.criterion, 'iqa_loss'): # For saving cost self.criterion.iqa_loss.freeze() self.model = build_model(ARCH, scale=self.scale) self.dataset = get_dataset(DATASET) if self.phase == 'train': self.train_loader = torch.utils.data.DataLoader( self.dataset(self.data_dir, 'train', self.scale, list_dir=self.list_dir, transform=Compose( MSCrop(self.scale, settings.patch_size), Flip()), repeats=settings.reproduce), batch_size=self. batch_size, #max(self.batch_size//settings.reproduce, 1), shuffle=True, num_workers=settings.num_workers, pin_memory=True, drop_last=True) self.val_loader = self.dataset(self.data_dir, 'val', self.scale, subset=settings.subset, list_dir=self.list_dir) if not self.val_loader.lr_avai: self.logger.warning( "warning: the low-resolution sources are not available") self.optimizer = torch.optim.Adam(self.model.parameters(), betas=(0.9, 0.999), lr=self.lr, weight_decay=settings.weight_decay) # self.optimizer = torch.optim.RMSprop( # self.model.parameters(), # lr=self.lr, # alpha=0.9, # weight_decay=settings.weight_decay # ) self.logger.dump(self.model) # Log the architecture
def __init__(self, ): print("usage examples:") print("python -m dataset.culane.test sample") print("python -m dataset.culane.test batch") print("python -m dataset.culane.test batch shuffle=False") flip = Flip(1.0) translate = Translate(1.0) rotate = Rotate(1.0) add_noise = AddGaussianNoise(1.0) change_intensity = ChangeIntensity(1.0) resize = Resize(rows=256, cols=512) hwc_to_chw = TransposeNumpyArray((2, 0, 1)) norm_to_1 = NormalizeInstensity() self.train_dataset = DatasetCollections(transform=transforms.Compose([ flip, translate, rotate, add_noise, change_intensity, resize, norm_to_1, hwc_to_chw ]), )
def __init__(self,): flip = Flip(1.0) translate = Translate(1.0) rotate = Rotate(1.0) add_noise = AddGaussianNoise(1.0) change_intensity = ChangeIntensity(1.0) resize = Resize(rows=256, cols=512) hwc_to_chw = TransposeNumpyArray((2, 0, 1)) norm_to_1 = NormalizeInstensity() json_file = ['label_data_0313.json', 'label_data_0531.json', 'label_data_0601.json'] self.train_dataset = DatasetTusimple(root_path="/media/zzhou/data-tusimple/lane_detection/train_set/", json_files=json_file, transform=transforms.Compose([flip, translate, rotate, add_noise, change_intensity, resize, norm_to_1, hwc_to_chw]),)
def __init__(self,): print("usage examples:") print("python -m dataset.bdd100k.test sample") print("python -m dataset.bdd100k.test batch") print("python -m dataset.bdd100k.test batch shuffle=False") flip = Flip(1.0) translate = Translate(1.0) rotate = Rotate(1.0) add_noise = AddGaussianNoise(1.0) change_intensity = ChangeIntensity(1.0) resize = Resize(rows=256, cols=512) hwc_to_chw = TransposeNumpyArray((2, 0, 1)) norm_to_1 = NormalizeInstensity() self.train_dataset = DatasetBDD100K(root_path="/media/zzhou/data-BDD100K/bdd100k/", json_files="labels/bdd100k_labels_images_train.json", transform=transforms.Compose([flip, translate, rotate, add_noise, change_intensity, resize, norm_to_1, hwc_to_chw]), )