Ejemplo n.º 1
0
 def __init__(self, root):
     self.size = (180,135)
     self.root = root
     if not os.path.exists(self.root):
         raise Exception("[!] {} not exists.".format(root))
     self.img_resize = Compose([
         Scale(self.size, Image.BILINEAR),
         # We can do some colorjitter augmentation here
         # ColorJitter(brightness=0, contrast=0, saturation=0, hue=0),
     ])
     self.label_resize = Compose([
         Scale(self.size, Image.NEAREST),
     ])
     self.img_transform = Compose([
         ToTensor(),
         Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),
     ])
     self.hsv_transform = Compose([
         ToTensor(),
     ])
     self.label_transform = Compose([
         ToLabel(),
         ReLabel(255, 1),
     ])
     #sort file names
     self.d_path = "/scratch/prathyuakundi/MIA_data/ISIC-2017_Training_Data/"
     self.g_path = "/scratch/prathyuakundi/MIA_data/ISIC-2017_Training_Part1_GroundTruth/ISIC-2017_Training_Part1_GroundTruth/"
     self.input_paths = sorted(next(os.walk(self.d_path))[2])
     self.label_paths = sorted(next(os.walk(self.g_path))[2])
     self.name = os.path.basename(root)
     if len(self.input_paths) == 0 or len(self.label_paths) == 0:
         raise Exception("No images/labels are found in {}".format(self.root))
Ejemplo n.º 2
0
 def __init__(self, root):
     self.size = (180,135)
     self.root = root
     if not os.path.exists(self.root):
         raise Exception("[!] {} not exists.".format(root))
     self.img_resize = Compose([ #like torch.nn.Sequential
         Scale(self.size, Image.BILINEAR), #deprecated in favor of Resize
         # We can do some colorjitter augmentation here
         # ColorJitter(brightness=0, contrast=0, saturation=0, hue=0),
     ])
     self.label_resize = Compose([
         Scale(self.size, Image.NEAREST),
     ])
     self.img_transform = Compose([
         ToTensor(),
         Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),
     ])
     self.hsv_transform = Compose([
         ToTensor(),
     ])
     self.label_transform = Compose([
         ToLabel(),
         ReLabel(255, 1),
     ])
     #sort file names
     self.input_paths = sorted(glob(os.path.join(self.root, '{}/*.jpg'.format("ISIC-2017_Training_Data"))))
     self.label_paths = sorted(glob(os.path.join(self.root, '{}/*.png'.format("ISIC-2017_Training_Part1_GroundTruth"))))
     self.name = os.path.basename(root)
     if len(self.input_paths) == 0 or len(self.label_paths) == 0:
         raise Exception("No images/labels are found in {}".format(self.root))
Ejemplo n.º 3
0
    def __init__(self, root):
        size = (128,128)
        self.root = root
        if not os.path.exists(self.root):
            raise Exception("[!] {} not exists.".format(root))
        self.img_transform = Compose([
            Scale(size, Image.BILINEAR),
            ToTensor(),
            Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),

        ])
        self.hsv_transform = Compose([
            Scale(size, Image.BILINEAR),
            ToTensor(),
        ])
        self.label_transform = Compose([
            Scale(size, Image.NEAREST),
            ToLabel(),
            ReLabel(255, 1),
        ])
        #sort file names
        self.input_paths = sorted(glob(os.path.join(self.root, '{}/*.jpg'.format("ISIC-2017_Test_v2_Data"))))
        self.label_paths = sorted(glob(os.path.join(self.root, '{}/*.png'.format("ISIC-2017_Test_v2_Part1_GroundTruth"))))
        self.name = os.path.basename(root)
        if len(self.input_paths) == 0 or len(self.label_paths) == 0:
            raise Exception("No images/labels are found in {}".format(self.root))
Ejemplo n.º 4
0
 def __init__(self, root):
     size = (465, 381)  #(369,233)
     self.root = root
     if not os.path.exists(self.root):
         raise Exception("[!] {} not exists.".format(root))
     self.img_transform = Compose([
         #Scale(size, Image.BILINEAR),
         ToTensor(),
         #Normalize((0.5, ), (0.5, )) #(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),
     ])
     self.hsv_transform = Compose([
         #Scale(size, Image.BILINEAR),
         ToTensor(),
     ])
     self.label_transform = Compose([
         #Scale(size, Image.NEAREST),
         #ToLabel(),
         ToTensor(),
         ReLabel(255, 1),
     ])
     #sort file names
     self.input_paths = sorted(
         glob(
             os.path.join(
                 '/media/dataraid/tensorflow/segm/experiments_data/test300_imgs/',
                 '{}/*.jpg'.format("data"))))
     self.label_paths = sorted(
         glob(
             os.path.join(
                 '/media/dataraid/tensorflow/segm/experiments_data/test300_gts/',
                 '{}/*.jpg'.format("data"))))
     self.name = os.path.basename(root)
     if len(self.input_paths) == 0 or len(self.label_paths) == 0:
         raise Exception("No images/labels are found in {}".format(
             self.root))
Ejemplo n.º 5
0
    def __init__(self, root):
        self.size = (465, 381)  #(369,233)
        self.root = root
        if not os.path.exists(self.root):
            raise Exception("[!] {} not exists.".format(root))

        self.img_resize = Compose([
            #Scale(self.size, Image.BILINEAR),
            # We can do some colorjitter augmentation here
            # ColorJitter(brightness=0, contrast=0, saturation=0, hue=0),
        ])
        self.label_resize = Compose([
            # Scale(self.size, Image.NEAREST),
        ])
        self.img_transform = Compose([
            RandomRotation(10),
            ToTensor(),
            #Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]),
            # Normalize((0.5, ), (0.5, )),
        ])
        self.hsv_transform = None  # Compose([
        #     ToTensor(),
        # ])
        self.label_transform = Compose([
            RandomRotation(10),
            ToTensor(),
            #ToLabel(),
            ReLabel(255, 1),
        ])
        #sort file names
        self.input_paths = sorted(
            glob(
                os.path.join(
                    '/media/dataraid/tensorflow/segm/experiments_data/train2500_imgs/',
                    '{}/*.jpg'.format("data"))))
        self.label_paths = sorted(
            glob(
                os.path.join(
                    '/media/dataraid/tensorflow/segm/experiments_data/train2500_gts',
                    '{}/*.jpg'.format("data"))))
        self.name = os.path.basename(root)
        if len(self.input_paths) == 0 or len(self.label_paths) == 0:
            raise Exception("No images/labels are found in {}".format(
                self.root))
Ejemplo n.º 6
0
    def __init__(self, fpath, augmentation=None, with_targets=True):
        if not os.path.isfile(fpath):
            raise FileNotFoundError(
                "Could not find dataset file: '{}'".format(fpath))

        if not augmentation:
            augmentation = []
        n_augmentation = math.factorial(
            len(augmentation)) if len(augmentation) > 0 else 0
        augmentation_combinations = list(
            itertools.product([0, 1], repeat=n_augmentation))

        self.with_targets = with_targets
        self.size = (180, 135)

        self.input_resize = Scale(self.size, Image.BILINEAR)
        self.target_resize = Scale(self.size, Image.NEAREST)
        self.input_transform = Compose([
            ToTensor(),
            Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
        self.target_transform = Compose([
            ToLabel(),
            ReLabel(255, 1),
        ])

        self.augmentation = augmentation

        with open(fpath, "r") as f:
            lines = filter(lambda l: bool(l), f.read().split("\n"))
            if self.with_targets:
                data = [(input.strip(), target.strip())
                        for input, target in funcy.walk(
                            lambda l: l.split(" "), lines)]
            else:
                data = [(input.strip(), None) for input in lines]

        self.data = [(d, transform_list)
                     for transform_list in augmentation_combinations
                     for d in data]
Ejemplo n.º 7
0

if __name__ == '__main__':

    input_transform = Compose([
        ColorAug(),
        Add_Gaussion_noise(prob=0.5),
        #Scale((512, 512), Image.BILINEAR),
        ToTensor(),
        Normalize([.485, .456, .406], [.229, .224, .225]),
    ])
    target_transform = Compose([
        #Scale((512, 512), Image.NEAREST),
        #ToSP(512),
        ToLabel(),
        ReLabel(255, 1),
    ])

    img_label_transform = Compose_imglabel([
        Random_crop(512, 512),
        Random_horizontal_flip(0.5),
        Random_vertical_flip(0.5),
    ])
    dst = VOCDataSet("./",
                     img_transform=input_transform,
                     label_transform=target_transform,
                     image_label_transform=img_label_transform)
    trainloader = data.DataLoader(dst, batch_size=1)

    for i, data in enumerate(trainloader):
        imgs, labels = data
Ejemplo n.º 8
0
check_if_done(json_fn)
args.machine = os.uname()[1]
save_dic_to_json(args.__dict__, json_fn)

train_img_shape = tuple([int(x) for x in args.train_img_shape])
test_img_shape = tuple([int(x) for x in args.test_img_shape])

img_transform = Compose([
    Scale(train_img_shape, Image.BILINEAR),
    ToTensor(),
    Normalize([.485, .456, .406], [.229, .224, .225]),
])
label_transform = Compose([
    Scale(train_img_shape, Image.NEAREST),
    ToLabel(),
    ReLabel(255, train_args.n_class - 1),
])

tgt_dataset = get_dataset(dataset_name=args.tgt_dataset,
                          split=args.split,
                          img_transform=img_transform,
                          label_transform=label_transform,
                          test=True,
                          input_ch=train_args.input_ch)

target_loader = data.DataLoader(tgt_dataset, batch_size=1, pin_memory=True)

if torch.cuda.is_available():
    model.cuda()

model.eval()
Ejemplo n.º 9
0
image_augmentor = ImageAugmentor()

NUM_CLASSES = 6
MODEL_NAME = "seg-norm"

input_transform = Compose([
    Scale((512, 256), Image.BILINEAR),
    Augment(0, image_augmentor),
    ToTensor(),
    Normalize([.485, .456, .406], [.229, .224, .225]),
])
target_transform = Compose([
    Scale((512, 256), Image.NEAREST),
    ToLabel(),
    ReLabel(),
])

target_2_transform = Compose([
    Scale((256, 128), Image.NEAREST),
    ToLabel(),
    ReLabel(),
])

target_4_transform = Compose([
    Scale((128, 64), Image.NEAREST),
    ToLabel(),
    ReLabel(),
])

trainloader = data.DataLoader(CSDataSet("/root/group-incubation-bj",
Ejemplo n.º 10
0
os.system('rm -rf ./runs/*')
writer = SummaryWriter('./runs/' + datetime.now().strftime('%B%d  %H:%M:%S'))

std = [.229, .224, .225]
mean = [.485, .456, .406]

input_transform = Compose([
    Scale((256, 256), Image.BILINEAR),
    ToTensor(),
    Normalize(mean, std),
])
target_transform = Compose([
    Scale((256, 256), Image.NEAREST),
    ToSP(256),
    ToLabel(),
    ReLabel(255, 21),
])

loader = data.DataLoader(VOCDataSet(
    "/home/zeng/data/datasets/segmentation_Dataset",
    img_transform=input_transform,
    label_transform=target_transform),
                         batch_size=12,
                         shuffle=True,
                         pin_memory=True)

res101 = resnet101(pretrained=True).cuda()
seg = Seg().cuda()

weight = torch.ones(22)
weight[21] = 0
Ejemplo n.º 11
0
train_img_shape = tuple([int(x) for x in args.train_img_shape])
img_transform_list = [
    Scale(train_img_shape, Image.BILINEAR),
    ToTensor(),
    Normalize([.485, .456, .406], [.229, .224, .225])
]
if args.augment:
    aug_list = [RandomRotation(), RandomHorizontalFlip(), RandomSizedCrop()]
    img_transform_list = aug_list + img_transform_list

img_transform = Compose(img_transform_list)

label_transform = Compose([
    Scale(train_img_shape, Image.NEAREST),
    ToLabel(),
    ReLabel(255,
            args.n_class - 1),  # Last Class is "Void" or "Background" class
])

src_dataset = get_dataset(dataset_name=args.src_dataset,
                          split=args.src_split,
                          img_transform=img_transform,
                          label_transform=label_transform,
                          test=False,
                          input_ch=args.input_ch,
                          keys_dict={
                              'image': 'S_image',
                              'mask': 'S_label_map'
                          })

tgt_dataset = get_dataset(dataset_name=args.tgt_dataset,
                          split=args.tgt_split,
Ejemplo n.º 12
0
    def __init__(self,
                 args,
                 batch_size=64,
                 source='svhn',
                 target='mnist',
                 learning_rate=0.0002,
                 interval=100,
                 optimizer='adam',
                 num_k=4,
                 all_use=False,
                 checkpoint_dir=None,
                 save_epoch=10):
        self.batch_size = batch_size
        self.source = source
        self.target = target
        self.num_k = num_k
        self.checkpoint_dir = checkpoint_dir
        self.save_epoch = save_epoch
        self.use_abs_diff = args.use_abs_diff
        self.all_use = all_use
        if self.source == 'svhn':
            self.scale = True
        else:
            self.scale = False
        print('dataset loading')
        if self.source == 'citycam' or self.target == 'citycam':
            import sys, os
            sys.path.append(
                os.path.join(os.path.dirname(__file__), '..', 'segmentation'))
            from transform import ReLabel, ToLabel, Scale, RandomSizedCrop, RandomHorizontalFlip, RandomRotation
            from PIL import Image
            from torchvision.transforms import Compose, Normalize, ToTensor
            from datasets import ConcatDataset, get_dataset, check_src_tgt_ok
            from models.model_util import get_models, get_optimizer

            train_img_shape = (
                64, 64)  #  tuple([int(x) for x in args.train_img_shape])
            img_transform_list = [
                Scale(train_img_shape, Image.BILINEAR),
                ToTensor(),
                Normalize([.485, .456, .406], [.229, .224, .225])
            ]
            #            if args.augment:
            #                aug_list = [
            #                    RandomRotation(),
            #                    RandomHorizontalFlip(),
            #                    RandomSizedCrop()
            #                ]
            #                img_transform_list = aug_list + img_transform_list

            img_transform = Compose(img_transform_list)

            label_transform = Compose([
                Scale(train_img_shape, Image.NEAREST),
                ToLabel(),
                ReLabel(
                    255, 12
                )  # args.n_class - 1),  # Last Class is "Void" or "Background" class
            ])

            src_dataset_test = get_dataset(dataset_name='citycam',
                                           split='synthetic-Sept19',
                                           img_transform=img_transform,
                                           label_transform=label_transform,
                                           test=True,
                                           input_ch=3,
                                           keys_dict={
                                               'image': 'image',
                                               'yaw': 'label',
                                               'yaw_raw': 'label_raw'
                                           })

            tgt_dataset_test = get_dataset(
                dataset_name='citycam',
                split=
                'real-Sept23-train, objectid IN (SELECT objectid FROM properties WHERE key="yaw")',
                img_transform=img_transform,
                label_transform=label_transform,
                test=True,
                input_ch=3,
                keys_dict={
                    'image': 'image',
                    'yaw': 'label',
                    'yaw_raw': 'label_raw'
                })

            self.dataset_test = torch.utils.data.DataLoader(
                #src_dataset_test,
                tgt_dataset_test,
                batch_size=args.batch_size,
                shuffle=False,
                pin_memory=True)

            dataset_train = get_dataset(dataset_name='citycam',
                                        split='synthetic-Sept19',
                                        img_transform=img_transform,
                                        label_transform=label_transform,
                                        test=False,
                                        input_ch=3,
                                        keys_dict={
                                            'image': 'S_image',
                                            'yaw': 'S_label',
                                            'yaw_raw': 'S_label_raw'
                                        })

            self.dataset_train = torch.utils.data.DataLoader(
                dataset_train,
                batch_size=args.batch_size,
                shuffle=True,
                pin_memory=True)

        else:
            from datasets_dir.dataset_read import dataset_read
            self.datasets_test, self.dataset_train = dataset_read(
                target,
                source,
                self.batch_size,
                scale=self.scale,
                all_use=self.all_use)
        self.G = Generator(source=source, target=target)
        print('load finished!')
        self.C1 = Classifier(source=source, target=target)
        self.C2 = Classifier(source=source, target=target)
        if args.eval_only:
            self.G.torch.load('%s/%s_to_%s_model_epoch%s_G.pt' %
                              (self.checkpoint_dir, self.source, self.target,
                               args.resume_epoch))
            self.G.torch.load('%s/%s_to_%s_model_epoch%s_G.pt' %
                              (self.checkpoint_dir, self.source, self.target,
                               self.checkpoint_dir, args.resume_epoch))
            self.G.torch.load('%s/%s_to_%s_model_epoch%s_G.pt' %
                              (self.checkpoint_dir, self.source, self.target,
                               args.resume_epoch))

        self.G.cuda()
        self.C1.cuda()
        self.C2.cuda()
        self.interval = interval

        self.set_optimizer(which_opt=optimizer, lr=learning_rate)
        self.lr = learning_rate
    def __getitem__(self, index):
        datafiles = self.files[self.split][index]

        if self.input_ch == 1:
            if "d" in self.split:
                img = Image.open(datafiles["depth"])
                np_img = depth_scaling(np.array(img))
                img = Image.fromarray(np_img)
                if self.img_transform:
                    img = self.img_transform(img)
            else:
                raise NotImplementedError()

        elif self.input_ch == 3:
            if "hha" in self.split:
                hha = Image.open(datafiles["hha"])
                if self.extra_img_transform:
                    img = self.extra_img_transform(hha)
                else:
                    img = self.img_transform(hha)
            # RGB
            else:
                img = Image.open(datafiles["rgb"])

                if self.img_transform:
                    img = self.img_transform(img)

        elif self.input_ch == 4:
            img = Image.open(datafiles["rgb"])
            np3ch = np.array(img)
            if "rgbd" in self.split:
                depth_file = datafiles["depth"]
                np_depth = np.array(Image.open(depth_file))
                np_depth = depth_scaling(np_depth)
                extended_np3ch = np.concatenate([np3ch, np_depth[:, :, np.newaxis]], axis=2)
                # print ("4ch is Depth channel")

            # RGBR
            else:
                extended_np3ch = np.concatenate([np3ch, np3ch[:, :, 0:1]], axis=2)
                # print ("4ch is R channel")
            img = Image.fromarray(np.uint8(extended_np3ch))
            if self.img_transform:
                img = self.img_transform(img)


        # RGBHHA
        elif self.input_ch == 6:
            rgb = Image.open(datafiles["rgb"])
            rgb = self.img_transform(rgb)

            hha = Image.open(datafiles["hha"])
            if self.extra_img_transform:
                hha = self.extra_img_transform(hha)
            else:
                hha = self.img_transform(hha)

            img = torch.cat([rgb, hha])

        # RGBHHAB
        elif self.input_ch == 7:
            rgb = Image.open(datafiles["rgb"])
            rgb = self.img_transform(rgb)

            hha = Image.open(datafiles["hha"])
            if self.extra_img_transform:
                hha = self.extra_img_transform(hha)
            else:
                hha = self.img_transform(hha)

            convert_to_torch_tensor = Compose(
                self.label_transform.transforms[:-1] + [ReLabel(255, 1)])  # Scale, ToTensor (Without Normalize)
            boundary = convert_to_torch_tensor(Image.open(datafiles["boundary"])).unsqueeze(0)
            # boundary = self.label_transform(Image.open(datafiles["boundary"]).convert("P")).unsqueeze(0)

            img = torch.cat([rgb, hha, boundary.float()])


        else:
            raise NotImplementedError()

        label_file = datafiles["label"]
        label = Image.open(label_file).convert("P")

        if self.label_transform:
            label = self.label_transform(label)

        if self.test:
            return img, label, label_file

        return img, label
if args.augment:
    aug_list = [
        RandomRotation(),
        # RandomVerticalFlip(), # non-realistic
        RandomHorizontalFlip(),
        RandomSizedCrop()
    ]
    img_transform_list = aug_list + img_transform_list

img_transform = Compose(img_transform_list)

label_transform = Compose([
    Scale(train_img_shape, Image.NEAREST),
    ToLabel(),
    ReLabel(255, num_class - 1),
])

src_dataset = get_dataset(dataset_name=args.src_dataset,
                          split="train",
                          img_transform=img_transform,
                          label_transform=label_transform,
                          test=False,
                          input_ch=args.input_ch)

kwargs = {
    'num_workers': 1,
    'pin_memory': True
} if torch.cuda.is_available() else {}
train_loader = torch.utils.data.DataLoader(src_dataset,
                                           batch_size=args.batch_size,
Ejemplo n.º 15
0
result_directory = "result_fcn8s_0709"
epoch_num = 500
env = "segmentation"
win = ""

input_transform = transforms.Compose([
    Scale((256, 256), Image.BILINEAR),
    transforms.ToTensor(),
    transforms.Normalize(mean, std),
])
target_transform = transforms.Compose([
    Scale((256, 256), Image.NEAREST),
    ToSP(256),
    #transforms.ToTensor(),
    ToLabel(),
    ReLabel(255,0),
])

n_classes = 22
dataset = VOCDataSet(root_directory,img_transform=input_transform,label_transform=target_transform)
dataloader = data.DataLoader(dataset,batch_size=8,shuffle=True,drop_last=False)



net = UNet(3,n_classes)

print net
# weights = torch.ones(22)
# weights[21] = 0
# weights = weights.cuda()
#
Ejemplo n.º 16
0
if args.augment:
    aug_list = [
        RandomRotation(),
        # RandomVerticalFlip(), # non-realistic
        RandomHorizontalFlip(),
        RandomSizedCrop()
    ]
    img_transform_list = aug_list + img_transform_list

img_transform = Compose(img_transform_list)

label_transform = Compose([
    Scale(train_img_shape, Image.NEAREST),
    ToLabel(),
    ReLabel(255, args.n_class - 1),
])

src_dataset = get_dataset(dataset_name=args.src_dataset,
                          split=args.split,
                          img_transform=img_transform,
                          label_transform=label_transform,
                          test=False,
                          input_ch=args.input_ch)

kwargs = {
    'num_workers': 1,
    'pin_memory': True
} if torch.cuda.is_available() else {}
train_loader = torch.utils.data.DataLoader(src_dataset,
                                           batch_size=args.batch_size,
Ejemplo n.º 17
0
                                lr=args.lr,
                                momentum=args.momentum,
                                opt=args.opt,
                                weight_decay=args.weight_decay)

# load image
train_img_shape = tuple([int(x) for x in args.train_img_shape])
img_transform = Compose([
    Scale(train_img_shape, Image.BILINEAR),
    ToTensor(),
    Normalize([.485, .456, .406], [.229, .224, .225])
])
label_transform = Compose([
    Scale(train_img_shape, Image.NEAREST),
    ToLabel(),
    ReLabel(255, args.n_class - 1),  # convert label
])

source_dataset = get_dataset(dataset_name='source',
                             img_lists=args.source_list,
                             label_lists=args.source_label_list,
                             img_transform=img_transform,
                             label_transform=label_transform,
                             test=False)
target_dataset = get_dataset(dataset_name='target',
                             img_lists=args.target_list,
                             label_lists=None,
                             img_transform=img_transform,
                             label_transform=None,
                             test=False)