Ejemplo n.º 1
0
def get_dataset(opts):
    """ Dataset And Augmentation
    """
    if opts.crop_val:
        val_transform = et.ExtCompose([
            et.ExtResize(opts.crop_size),
            et.ExtCenterCrop(opts.crop_size),
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])
    else:
        val_transform = et.ExtCompose([
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])

    val_dst = VOCSegmentation(root=opts.data_root,
                              year=opts.year,
                              image_set='val',
                              download=False,
                              transform=val_transform,
                              ret_fname=True)

    return val_dst
def get_dataset(opts):
    """ Dataset And Augmentation
    """
    if opts.dataset == 'voc':
        train_transform = et.ExtCompose([
            #et.ExtResize(size=opts.crop_size),
            et.ExtRandomScale((0.5, 2.0)),
            et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size), pad_if_needed=True),
            et.ExtRandomHorizontalFlip(),
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])
        if opts.crop_val:
            val_transform = et.ExtCompose([
                et.ExtResize(opts.crop_size),
                et.ExtCenterCrop(opts.crop_size),
                et.ExtToTensor(),
                et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225]),
            ])
        else:
            val_transform = et.ExtCompose([
                et.ExtToTensor(),
                et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225]),
            ])
        train_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
                                    image_set='train', download=opts.download, transform=train_transform)
        val_dst = VOCSegmentation(root=opts.data_root, year=opts.year,
                                  image_set='val', download=False, transform=val_transform)

    if opts.dataset == 'cityscapes':
        train_transform = et.ExtCompose([
            #et.ExtResize( 512 ),
            et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
            et.ExtColorJitter( brightness=0.5, contrast=0.5, saturation=0.5 ),
            et.ExtRandomHorizontalFlip(),
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])

        val_transform = et.ExtCompose([
            et.ExtResize( 256 ),
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])

        train_dst = Cityscapes(root=opts.data_root,
                               split='train', transform=train_transform)
        val_dst = Cityscapes(root=opts.data_root,
                             split='val', transform=val_transform)
    return train_dst, val_dst
Ejemplo n.º 3
0
def get_test_dataset(opts):
    """ Dataset And Augmentation
    """
    if opts.crop_val:
        val_transform = et.ExtCompose([
            et.ExtResize(opts.crop_size),
            et.ExtCenterCrop(opts.crop_size),
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])
    else:
        val_transform = et.ExtCompose([
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])
    # 生成test_dir
    test_dst = DataSegmentationTest(transform=val_transform,
                                    test_dir=opts.test_dir,
                                    png_dir=opts.png_dir)
    return test_dst
Ejemplo n.º 4
0
def get_dataset(opts):
    """ Dataset And Augmentation
    """
    train_transform = et.ExtCompose([
        #et.ExtResize(size=opts.crop_size),
        et.ExtRandomScale((0.5, 2.0)),
        et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size),
                         pad_if_needed=True),
        et.ExtRandomHorizontalFlip(),
        et.ExtToTensor(),
        et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    if opts.crop_val:
        val_transform = et.ExtCompose([
            et.ExtResize(opts.crop_size),
            et.ExtCenterCrop(opts.crop_size),
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])
    else:
        val_transform = et.ExtCompose([
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])
    train_dst = DataSegmentation(image_set='train',
                                 transform=train_transform,
                                 jpg_dir=opts.jpg_dir,
                                 png_dir=opts.png_dir,
                                 list_dir=opts.list_dir)
    val_dst = DataSegmentation(image_set='val',
                               transform=val_transform,
                               jpg_dir=opts.jpg_dir,
                               png_dir=opts.png_dir,
                               list_dir=opts.list_dir)
    return train_dst, val_dst
Ejemplo n.º 5
0
def get_dataloader(args):
    if args.dataset.lower()=='mnist':
        train_loader = torch.utils.data.DataLoader( 
            datasets.MNIST(args.data_root, train=True, download=True,
                       transform=transforms.Compose([
                           transforms.Resize((32, 32)),
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                        ])),
            batch_size=args.batch_size, shuffle=True, num_workers=2)
        test_loader = torch.utils.data.DataLoader( 
            datasets.MNIST(args.data_root, train=False, download=True,
                      transform=transforms.Compose([
                          transforms.Resize((32, 32)),
                          transforms.ToTensor(),
                          transforms.Normalize((0.1307,), (0.3081,))
                        ])),
            batch_size=args.batch_size, shuffle=True, num_workers=2)

    elif args.dataset.lower()=='cifar10':
        train_loader = torch.utils.data.DataLoader( 
            datasets.CIFAR10(args.data_root, train=True, download=True,
                       transform=transforms.Compose([
                            transforms.RandomCrop(32, padding=4),
                            transforms.RandomHorizontalFlip(),
                            transforms.ToTensor(),
                            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
                        ])),
            batch_size=args.batch_size, shuffle=True, num_workers=2)
        test_loader = torch.utils.data.DataLoader( 
            datasets.CIFAR10(args.data_root, train=False, download=True,
                       transform=transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
                        ])),
            batch_size=args.batch_size, shuffle=True, num_workers=2)
    elif args.dataset.lower()=='cifar100':
        train_loader = torch.utils.data.DataLoader( 
            datasets.CIFAR100(args.data_root, train=True, download=True,
                       transform=transforms.Compose([
                            transforms.RandomCrop(32, padding=4),
                            transforms.RandomHorizontalFlip(),
                            transforms.ToTensor(),
                            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
                        ])),
            batch_size=args.batch_size, shuffle=True, num_workers=2)
        test_loader = torch.utils.data.DataLoader( 
            datasets.CIFAR100(args.data_root, train=False, download=True,
                       transform=transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
                        ])),
            batch_size=args.batch_size, shuffle=True, num_workers=2)
    elif args.dataset.lower()=='caltech101':
        train_loader = torch.utils.data.DataLoader(
            Caltech101(args.data_root, train=True, download=args.download,
                        transform=transforms.Compose([
                            transforms.Resize(128),
                            transforms.RandomCrop(128),
                            transforms.RandomHorizontalFlip(),
                            transforms.ToTensor(),
                            transforms.Normalize((0.5,), (0.5,))
                        ])),
            batch_size=args.batch_size, shuffle=True, num_workers=2)
        test_loader = torch.utils.data.DataLoader(
            Caltech101(args.data_root, train=False, download=args.download, 
                        transform=transforms.Compose([
                            transforms.Resize(128),
                            transforms.CenterCrop(128),
                            transforms.ToTensor(),
                            transforms.Normalize((0.5,), (0.5,))
                        ])), 
            batch_size=args.test_batch_size, shuffle=False, num_workers=2)

    ############ Segmentation       
    elif args.dataset.lower()=='camvid':
        train_loader = torch.utils.data.DataLoader(
            CamVid(args.data_root, split='train',
                        transform=ext_transforms.ExtCompose([
                            ext_transforms.ExtResize(256),
                            ext_transforms.ExtRandomCrop(128, pad_if_needed=True),
                            ext_transforms.ExtRandomHorizontalFlip(),
                            ext_transforms.ExtToTensor(),
                            ext_transforms.ExtNormalize((0.5,), (0.5,))
                        ])),
            batch_size=args.batch_size, shuffle=True, num_workers=2)
        test_loader = torch.utils.data.DataLoader(
            CamVid(args.data_root, split='test',
                        transform=ext_transforms.ExtCompose([
                            ext_transforms.ExtResize(256),
                            ext_transforms.ExtToTensor(),
                            ext_transforms.ExtNormalize((0.5,), (0.5,))
                        ])),
            batch_size=args.test_batch_size, shuffle=False, num_workers=2)
    elif args.dataset.lower() in ['nyuv2']:
        train_loader = torch.utils.data.DataLoader(
            NYUv2(args.data_root, split='train',
                        transform=ext_transforms.ExtCompose([
                            ext_transforms.ExtResize(256),
                            ext_transforms.ExtRandomCrop(128, pad_if_needed=True),
                            ext_transforms.ExtRandomHorizontalFlip(),
                            ext_transforms.ExtToTensor(),
                            ext_transforms.ExtNormalize((0.5,), (0.5,))
                        ])),
            batch_size=args.batch_size, shuffle=True, num_workers=2)
        test_loader = torch.utils.data.DataLoader(
            NYUv2(args.data_root, split='test',
                        transform=ext_transforms.ExtCompose([
                            ext_transforms.ExtResize(256),
                            ext_transforms.ExtToTensor(),
                            ext_transforms.ExtNormalize((0.5,), (0.5,))
                        ])),
            batch_size=args.test_batch_size, shuffle=False, num_workers=2)

    return train_loader, test_loader
Ejemplo n.º 6
0
def get_dataset(opts):
    """ Dataset And Augmentation
    """
    if opts.dataset == 'voc':
        train_transform = et.ExtCompose([
            #et.ExtResize(size=opts.crop_size),
            et.ExtRandomScale((0.5, 2.0)),
            et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size),
                             pad_if_needed=True),
            et.ExtRandomHorizontalFlip(),
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])
        if opts.crop_val:
            val_transform = et.ExtCompose([
                et.ExtResize(opts.crop_size),
                et.ExtCenterCrop(opts.crop_size),
                et.ExtToTensor(),
                et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225]),
            ])
        else:
            val_transform = et.ExtCompose([
                et.ExtToTensor(),
                et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225]),
            ])
        train_dst = VOCSegmentation(root=opts.data_root,
                                    year=opts.year,
                                    image_set='train',
                                    download=opts.download,
                                    transform=train_transform)
        val_dst = VOCSegmentation(root=opts.data_root,
                                  year=opts.year,
                                  image_set='val',
                                  download=False,
                                  transform=val_transform)

    if opts.dataset == 'cityscapes':
        train_transform = et.ExtCompose([
            #et.ExtResize( 512 ),
            et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),
            et.ExtColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),
            et.ExtRandomHorizontalFlip(),
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])

        val_transform = et.ExtCompose([
            #et.ExtResize( 512 ),
            et.ExtToTensor(),
            et.ExtNormalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225]),
        ])

        train_dst = Cityscapes(root=opts.data_root,
                               split='train',
                               transform=train_transform)
        val_dst = Cityscapes(root=opts.data_root,
                             split='val',
                             transform=val_transform)

    if opts.dataset == 'weedcluster':
        train_dst = WeedClusterDataset(root=opts.data_root, split='train')
        val_dst = WeedClusterDataset(root=opts.data_root, split='val')

    if opts.dataset == 'cloudshadow':
        train_dst = CloudShadowDataset(root=opts.data_root, split='train')
        val_dst = CloudShadowDataset(root=opts.data_root, split='val')

    if opts.dataset == 'doubleplant':
        train_dst = DoublePlantDataset(root=opts.data_root, split='train')
        val_dst = DoublePlantDataset(root=opts.data_root, split='val')

    if opts.dataset == 'planterskip':
        train_dst = PlanterSkipDataset(root=opts.data_root, split='train')
        val_dst = PlanterSkipDataset(root=opts.data_root, split='val')

    if opts.dataset == 'standingwater':
        train_dst = StandingWaterDataset(root=opts.data_root, split='train')
        val_dst = StandingWaterDataset(root=opts.data_root, split='val')

    if opts.dataset == 'waterway':
        train_dst = WaterwayDataset(root=opts.data_root, split='train')
        val_dst = WaterwayDataset(root=opts.data_root, split='val')

    return train_dst, val_dst
Ejemplo n.º 7
0
import torch
import torch.nn as nn
import torch.nn.functional as F

from PIL import Image
import matplotlib
import matplotlib.pyplot as plt

data_root = '/data/kdn/Dataset/Cityscapes'
crop_size = 520
train_transform = et.ExtCompose([
    et.ExtRandomCrop(size=(crop_size, crop_size)),
    et.ExtColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),
    et.ExtRandomHorizontalFlip(),
    et.ExtToTensor(),
    et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

val_transform = et.ExtCompose([
    et.ExtToTensor(),
    et.ExtNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

train_dst = Cityscapes(root=data_root,
                       split='train',
                       transform=train_transform)
val_dst = Cityscapes(root=data_root, split='val', transform=val_transform)

train_loader = data.DataLoader(train_dst,
                               batch_size=2,