Beispiel #1
0
def get_transform(args):

    if args.transform == 'resize':

        rgb_transforms = Compose(
            [scale_rgb_to_256_factor,
             ToTensor(),
             AddNoise((3, 256, 1024))])
        gt_transforms = Compose(
            [scale_gt_to_256_factor,
             ToTensor(), to_longTensor_gt])

    elif args.transform == 'crop':

        rgb_transforms = Compose(
            [center_crop_to_256_factor,
             ToTensor(),
             AddNoise((3, 256, 1024))])
        gt_transforms = Compose(
            [center_crop_to_256_factor,
             ToTensor(), to_longTensor_gt])

    else:
        raise ValueError('the value (%s) for --transform is not valid.' %
                         args.transform)

    torchvision_transforms = []

    if args.phase == 'train':
        grayscale = Grayscale(num_output_channels=3)
        grayscale.probability = 0.075
        torchvision_transforms.append(grayscale)
        colorJitter = ColorJitter(brightness=0.2,
                                  contrast=0.2,
                                  saturation=0.2,
                                  hue=0.2)
        colorJitter.probability = 0.1
        torchvision_transforms.append(colorJitter)

        return rgb_transforms, torchvision_transforms, gt_transforms

    elif args.phase == 'test':

        return rgb_transforms
def get_transform(args):

    if args.transform == 'resize':

        rgb_transforms = Compose(
            [Resize((128, 512)),
             ToTensor(),
             AddNoise((3, 128, 512))])

        if args.phase == 'train':
            gt_transforms = Compose([
                Resize((128, 512), interpolation=PIL.Image.NEAREST),
                ToTensor(), to_longTensor_gt
            ])
            depth_transforms = Compose(
                [Resize((128, 512)),
                 ToTensor(), to_floatTensor_depth])
        else:
            gt_transforms = []
            depth_transforms = []

    elif args.transform == 'crop':

        rgb_transforms = Compose(
            [center_crop_to_256_factor,
             ToTensor(),
             AddNoise((3, 128, 512))])

        if args.phase == 'train':
            gt_transforms = Compose(
                [center_crop_to_256_factor,
                 ToTensor(), to_longTensor_gt])
            depth_transforms = Compose(
                [center_crop_to_256_factor,
                 ToTensor(), to_floatTensor_depth])
        else:
            gt_transforms = []
            depth_transforms = []

    else:
        if args.phase == 'train':
            raise ValueError('the value (%s) for --transform is not valid.' %
                             args.transform)

    torchvision_transforms = []

    if args.phase == 'train':
        grayscale = Grayscale(num_output_channels=3)
        grayscale.probability = 0.075
        torchvision_transforms.append(grayscale)
        colorJitter = ColorJitter(brightness=0.2,
                                  contrast=0.2,
                                  saturation=0.2,
                                  hue=0.2)
        colorJitter.probability = 0.1
        torchvision_transforms.append(colorJitter)

        return rgb_transforms, torchvision_transforms, depth_transforms, gt_transforms

    elif args.phase == 'test':

        return rgb_transforms