Exemplo n.º 1
0
def test_random_perspective_save_load(tmpdir):
    fn = T.RandomPerspective()
    _test_fn_save_load(fn, tmpdir)
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import sys

save_path = 'CAN.pth'

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

print("Training Using", device)

criterion = nn.CrossEntropyLoss()

augmentation_transform = transforms.Compose(
    [   transforms.RandomPerspective(distortion_scale=0.1, p=0.5, interpolation=2),
        transforms.RandomAffine(degrees = (-10, 10), translate=(0.1,0.1), scale=(0.9,1.1), shear=(0.1,0.1)),
        transforms.ToTensor()])

transform = transforms.Compose(
    [   transforms.ToTensor()])

trainset = torchvision.datasets.MNIST(root = 'data/', train = True, transform = augmentation_transform, download = True)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=8)

testset = torchvision.datasets.MNIST(root = 'data/', train = False, transform = transform, download = True)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False, num_workers=8)

class can(nn.Module):

    def __init__(self):
Exemplo n.º 3
0
def get_datasets(data_dir, cfg, mode="train"):

    common_transforms = []
    train_transforms = []
    test_transforms = []
    #if cfg.transform.transform_resize_match:
    common_transforms.append(transforms.Resize((cfg.transform.transform_resize,cfg.transform.transform_resize)))
    
    if cfg.transform.transform_random_resized_crop:
        train_transforms.append(transforms.RandomResizedCrop(cfg.transform.transform_resize))
    if cfg.transform.transform_random_horizontal_flip:
        train_transforms.append(torchvision.transforms.RandomHorizontalFlip(p=0.5))
    if cfg.transform.transform_random_rotation:
        train_transforms.append(transforms.RandomRotation(cfg.transform.transform_random_rotation_degrees))#, fill=255))
    if cfg.transform.transform_random_shear:
        train_transforms.append(torchvision.transforms.RandomAffine(0,
                                                                    shear=(
                                                                        cfg.transform.transform_random_shear_x1,
                                                                        cfg.transform.transform_random_shear_x2,
                                                                        cfg.transform.transform_random_shear_y1,
                                                                        cfg.transform.transform_random_shear_y2
                                                                        ),
                                                                    fillcolor=255)) 
    if cfg.transform.transform_random_perspective:
        train_transforms.append(transforms.RandomPerspective(distortion_scale=cfg.transform.transform_perspective_scale, 
                                     p=0.5, 
                                     interpolation=3)
                                )
    if cfg.transform.transform_random_affine:
        train_transforms.append(transforms.RandomAffine(degrees=(cfg.transform.transform_degrees_min,
                                                                 cfg.transform.transform_degrees_max),
                                                        translate=(cfg.transform.transform_translate_a,
                                                                   cfg.transform.transform_translate_b),
                                                        fillcolor=255))
    data_transforms = {
            'train': transforms.Compose(common_transforms+train_transforms+[transforms.ToTensor()]),
            'test': transforms.Compose(common_transforms+[transforms.ToTensor()]),
            }

    train_dataset = datasets.ImageFolder(os.path.join(data_dir, "train"),
            data_transforms["train"])





    # for the final model we can join train, validation, validation samples datasets
    print(mode)
    if mode == "final_train":
        #train_dataset = torch.utils.data.ConcatDataset([train_dataset,
        #        val_dataset,
        #        val_samples_dataset])

        test_dataset = datasets.ImageFolder(os.path.join(data_dir, "test"),
                data_transforms["test"])

        samples_dataset = datasets.ImageFolder(os.path.join(data_dir, "samples"),
                data_transforms["test"])
        return train_dataset, test_dataset, samples_dataset
    else:
        if mode == "train":
            val_dataset = datasets.ImageFolder(os.path.join(data_dir, "val"),
                    data_transforms["test"])

            val_samples_dataset = datasets.ImageFolder(os.path.join(data_dir, "val_samples"),
                    data_transforms["test"])
            return train_dataset, val_dataset, val_samples_dataset

        if mode == "test":
            return train_dataset, test_dataset, samples_dataset
Exemplo n.º 4
0
    }
    revd = dict([reversed(i) for i in base_dict.items()])
    base_dict.update(revd)
    return base_dict[key]


data_transform = {
    'train':
    transforms.Compose([
        transforms.RandomRotation(360),
        transforms.Resize(256),
        transforms.RandomResizedCrop(224, scale=(0.8, 1.0)),
        transforms.RandomChoice([
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.RandomPerspective(distortion_scale=0.1),
            transforms.RandomAffine(10)
        ]),
        transforms.RandomChoice([
            transforms.ColorJitter(brightness=0.2,
                                   contrast=0.2,
                                   saturation=0.3,
                                   hue=0.1),
            transforms.RandomGrayscale(p=0.5)
        ]),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'val':
    transforms.Compose([
        transforms.Resize((256, 256)),
Exemplo n.º 5
0
            x = int(random.choice(points))
            y = int(random.choice(points))
            background.paste(img, (x, y), img)
            return background
        else:
            return img


if __name__ == "__main__":

    trans2 = transforms.Compose([
        transforms.Resize((256, 256)),
        GauBlur(0.2),
        transforms.RandomRotation(180),
        transforms.RandomHorizontalFlip(),
        transforms.RandomPerspective(p=0.5),
        transforms.ColorJitter(brightness=0.3),
        BackGround(1,
                   "../../../SwimData/SwimCodes/classification/train/False"),
        GauBlur(0.2),
        transforms.Resize((25, 25)),
        transforms.Resize((256, 256)),
    ])

    trans3 = transforms.Compose([transforms.Resize((256, 256)), HoriBlur(1)])

    billed = PIL.Image.open(
        "../../../SwimData/GeoCodes/classification2/art/A/A.png")
    plt.imshow(billed)
    plt.show()
    nytbild = trans2(billed)
Exemplo n.º 6
0
 def __init__(self):
     # TODO: try RandomPerspective and Normalize
     self.affine = transforms.RandomAffine(degrees=45, scale=(0.8, 1.6))
     self.flip = transforms.RandomHorizontalFlip(0.5)
     self.perspective = transforms.RandomPerspective()
Exemplo n.º 7
0
"""# Dataset
在 Pytorch 中,我們可以利用 torch.utils.data 的 Dataset 及 DataLoader 來"包裝" data,使後續的 training 及 testing 更為方便。
Dataset 需要 overload 兩個函數:\_\_len\_\_ 及 \_\_getitem\_\_
\_\_len\_\_ 必須要回傳 dataset 的大小,而 \_\_getitem\_\_ 則定義了當程式利用 [ ] 取值時,dataset 應該要怎麼回傳資料。
實際上我們並不會直接使用到這兩個函數,但是使用 DataLoader 在 enumerate Dataset 時會使用到,沒有實做的話會在程式運行階段出現 error。
"""

#training 時做 data augmentation
train_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.RandomHorizontalFlip(), #隨機將圖片水平翻轉
    transforms.RandomVerticalFlip(),
    transforms.ColorJitter(),
    transforms.RandomRotation(45), #隨機旋轉圖片
    transforms.RandomPerspective(),
    transforms.ToTensor(), #將圖片轉成 Tensor,並把數值normalize到[0,1](data normalization)
])
#testing 時不需做 data augmentation
test_transform = transforms.Compose([
    transforms.ToPILImage(),                                    
    transforms.ToTensor(),
])
class ImgDataset(Dataset):
    def __init__(self, x, y=None, transform=None):
        self.x = x
        # label is required to be a LongTensor
        self.y = y
        if y is not None:
            self.y = torch.LongTensor(y)
        self.transform = transform
Exemplo n.º 8
0
    # crop_or_pad_to(resolution, resolution)
])

# my compos

transforms_elastic = compose([
    pad(4, mode="constant", constant_value=0.5),
    T.RandomErasing(0.2),
    random_rotate(list(range(-30, 30)) + 20 * [0]),
    random_elastic(),
    jitter(8),
    normalize()
])

transforms_custom = compose([
    pad(4, mode="constant", constant_value=0.5),
    # T.RandomPerspective(0.33, 0.2),
    # T.RandomErasing(0.2),
    random_rotate(list(range(-30, 30)) + 20 * [0]),
    jitter(8),
    normalize()
])

transforms_fast = compose([
    T.RandomPerspective(0.33, 0.2),
    T.RandomErasing(0.2),
    random_rotate_fast(list(range(-30, 30)) + 20 * [0]),
    normalize()
])

Exemplo n.º 9
0
def get_transforms():
    # Keep the same
    t0 = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Scale brightness between the range (1.5,3.5)
    t1 = transforms.Compose([
        transforms.ColorJitter(brightness=2.5),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Scale saturation between (1,2)
    t2 = transforms.Compose([
        transforms.ColorJitter(saturation=2),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Scale contrast between (1,1.5)
    t3 = transforms.Compose([
        transforms.ColorJitter(contrast=1.5),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Scale hue
    t4 = transforms.Compose([
        transforms.ColorJitter(hue=0.2),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Random horizontal flips
    t5 = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Random shearing
    t6 = transforms.Compose([
        transforms.RandomAffine(degrees=20, shear=3),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Random Translation
    t7 = transforms.Compose([
        transforms.RandomAffine(degrees=10, translate=(0.2, 0.2)),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Random perspective change
    t8 = transforms.Compose([
        transforms.RandomPerspective(),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    # Random rotation
    t9 = transforms.Compose([
        transforms.RandomRotation(20),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.4054, 0.3780, 0.3547),
                             std=(0.2221, 0.2151, 0.2112))
    ])

    return t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
Exemplo n.º 10
0
        transforms.ToTensor(),
    ])
    transform2 = transforms.Compose([
        transforms.RandomGrayscale(),
        transforms.ToTensor(),
    ])
    transform3 = transforms.Compose([
        transforms.RandomResizedCrop(size=(28, 28)),
        transforms.ToTensor(),
    ])
    transform4 = transforms.Compose([
        transforms.RandomAffine(degrees=15),
        transforms.ToTensor(),
    ])
    transform5 = transforms.Compose([
        transforms.RandomPerspective(),
        transforms.ToTensor(),
    ])
    transform6 = transforms.Compose([
        transforms.RandomApply([transforms.RandomPerspective(),
            transforms.RandomAffine(degrees=15),
            transforms.RandomResizedCrop(size=(28, 28)),
            transforms.RandomGrayscale()]),
        transforms.ToTensor(),
    ])

    transform_list = [transform1, transform2, transform3, transform4, transform5]

    for i, transform in enumerate(transform_list):
        print("\r -----%d----- \r" % (i + 1))
        print(FC())
Exemplo n.º 11
0
    def __init__(
        self,
        datadir,
        crop_size=(512, 512),
        target_transform=None,
        common_transforms=None,
        transform=None,
        val=False,
        band_norm=True,
    ):
        super(ICVLDataset, self).__init__()
        datadir = Path(datadir)
        self.files = [datadir / f for f in os.listdir(datadir) if f.endswith(".npy")]
        if dist.is_initialized():
            random.shuffle(self.files)

        # load all the data at the top
        self.loadfrom = []  # np.zeros(first, dtype=np.float32)
        self.band_norm = band_norm
        for c, f in enumerate(self.files):
            # the images are already in [bands, height, width]
            # loaded, _ = utils.normalize(
            #     torch.tensor(np.load(f), dtype=torch.float32), by_band=band_norm, band_dim=0
            # )
            loaded = torch.tensor(np.load(f), dtype=torch.float32)
            self.loadfrom.append(loaded)

        self.loadfrom = tuple(self.loadfrom)

        if not val:
            self.base_transforms = transforms.Compose(
                [
                    # transforms.CenterCrop(crop_size),
                    # transforms.RandomCrop(crop_size),
                    transforms.RandomResizedCrop(
                        crop_size, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333)
                    ),
                    hyde_transforms.RandomBandPerm(10),
                    hyde_transforms.RandChoice(
                        [
                            hyde_transforms.RandRot90Transform(),
                            transforms.RandomVerticalFlip(p=0.9),
                            transforms.RandomAffine(
                                degrees=180,
                                # scale=(0.1, 10), # old (0.1, 3)
                                shear=20,
                            ),
                            transforms.RandomHorizontalFlip(p=0.9),
                            transforms.RandomPerspective(p=0.88),
                        ],
                        p=None,  # 0.5,
                        combos=True,
                    ),
                ]
            )
        else:
            self.base_transforms = transforms.CenterCrop(crop_size)  # RandomCrop(crop_size)

        self.target_transform = target_transform
        self.common_transforms = common_transforms
        self.length = len(self.files)

        self.transform = transform
Exemplo n.º 12
0
def test_random_perspective_save(tmpdir):
    transform = T.RandomPerspective()
    s_transform = torch.jit.script(transform)
    s_transform.save(os.path.join(tmpdir, "t_perspective.pt"))
from torch.utils.data import DataLoader
import time
from torchvision import models
import os
from torch.utils.tensorboard import SummaryWriter


## TENSORBOARD
logdir = "./Tensorboard/Experiment1_MobilenetV2_PretrainedFalse_Augmentation_LR0.01/"
writer = SummaryWriter(logdir)

## TRANSFORM
transform_ori = transforms.Compose([
                                    transforms.RandomHorizontalFlip(p=0.5),
                                    transforms.ColorJitter(brightness=0.2, contrast=0.25, saturation=0.2, hue=0.05),
                                    transforms.RandomPerspective(distortion_scale=0.04, p=0.4),
                                    transforms.ToTensor(),
                                    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
                                    ])

transform_test = transforms.Compose([
                                    transforms.ToTensor(),
                                    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
                                    ])

## LOAD DATASET
train_dataset = datasets.ImageFolder(root = './Dataset/train/',
                                     transform = transform_ori)

test_dataset = datasets.ImageFolder(root = './Dataset/test/',
                                    transform = transform_test)
Exemplo n.º 14
0
imgs = bp.unpack_ndarray_from_file(
    '../features/train_images_size128_pad10_max_noclean.bloscpack')
lbls = pd.read_csv('../input/train.csv').iloc[:, 1:4].values

trn_imgs = imgs[trn_ndx]
trn_lbls = lbls[trn_ndx]
vld_imgs = imgs[vld_ndx]
vld_lbls = lbls[vld_ndx]

# =========================================================================================================================

tsfm = pttf.Compose([
    pttf.ToPILImage(),
    pttf.RandomChoice([
        pttf.RandomAffine(degrees=15, scale=(.9, 1.1), shear=15, resample=3),
        pttf.RandomPerspective(distortion_scale=0.5, p=1.),
    ], ),
    pttf.ToTensor(),
    #pttf.RandomErasing(p=0.5, scale=(.05, .1), ratio=(0.3, 3.3), value=0, inplace=False),
])

# =========================================================================================================================

batch_size = 64  # 64 is important as the fit_one_cycle arguments are probably tuned for this batch size

training_set = Bengaliai_DS(trn_imgs, trn_lbls, transform=tsfm)
validation_set = Bengaliai_DS(vld_imgs, vld_lbls)

training_loader = DataLoader(training_set,
                             batch_size=batch_size,
                             num_workers=6,
Exemplo n.º 15
0
####################################
# Grayscale
# ---------
# The :class:`~torchvision.transforms.Grayscale` transform
# (see also :func:`~torchvision.transforms.functional.to_grayscale`)
# converts an image to grayscale
gray_img = T.Grayscale()(orig_img)
plot(gray_img, "Grayscale image", cmap='gray')

####################################
# RandomPerspective
# -----------------
# The :class:`~torchvision.transforms.RandomPerspective` transform
# (see also :func:`~torchvision.transforms.functional.perspective`)
# performs random perspective transform on an image.
perspectived_img = T.RandomPerspective(distortion_scale=0.6, p=1.0)(orig_img)
plot(perspectived_img, "Perspective transformed image")

####################################
# RandomRotation
# --------------
# The :class:`~torchvision.transforms.RandomRotation` transform
# (see also :func:`~torchvision.transforms.functional.rotate`)
# rotates an image with random angle.
rotated_img = T.RandomRotation(degrees=(30, 70))(orig_img)
plot(rotated_img, "Rotated image")

####################################
# RandomAffine
# ------------
# The :class:`~torchvision.transforms.RandomAffine` transform