예제 #1
0
        def __init__(self):
            self.lr = 1e-5  #0.007
            self.lr_gamma = 0.1
            self.momentum = 0.9
            self.weight_decay = 0.00004
            self.bn_mom = 0.0003
            self.power = 0.9
            self.gpus = 1
            self.batch_size = 2
            self.epochs = 10
            self.eval_display = 200
            self.display = 2
            self.num_classes = 21
            self.ckpt_step = 5000
            self.workers = 1
            self.distributed = True
            self.crop_height = 512
            self.crop_width = 512
            self.sampler = DistributedSampler

            self.log_dir = './log'
            self.log_name = 'deeplabv3+'
            self.transforms = Compose([
                RandomResizedCrop(size=(self.crop_height, self.crop_width)),
                RandomHorizontalFlip(),
                RandomVerticalFlip(),
                ToTensor(),
                Normalize(mean=(0.485, 0.456, 0.406),
                          std=(0.229, 0.224, 0.225))
            ])
def get_transform(train):
    transforms = []
    if train:
        transforms.append(Rescale(256))
        transforms.append(RandomCrop(224))
    else:
        transforms.append(Rescale(224))
    transforms.append(ToTensor())

    return Compose(transforms)
예제 #3
0
 def __init__(self):
     self.model_path = ''
     self.batch_size = 1
     self.workers = 2
     self.result_dir = ''
     self.sampler = DistributedSampler
     self.transforms = Compose([
         ToTensor(),
         Normalize(mean=(0.485, 0.456, 0.406),
                   std=(0.229, 0.224, 0.225))
     ])
예제 #4
0

if __name__ == "__main__":

    model = "./demo"
    net = fabrec.load_net(model, num_landmarks=17)
    net.eval()

    im_dir = "./images"
    img0 = "5.jpg"

    with torch.no_grad():

        img = load_image(im_dir, img0, channels=1, size=(256, 256))
        img /= 256
        img = ToTensor()(img)
        img = cfg.RHPE_NORMALIZER(img)
        img = torch.unsqueeze(img, 0)
        print(img, img.shape)

        X_recon, lms_in_crop, X_lm_hm = net.detect_landmarks(img)
        outputs = add_landmarks_to_images(img,
                                          lms_in_crop,
                                          skeleton=HandDataset.SKELETON,
                                          denorm=True,
                                          draw_wireframe=True,
                                          color=(0, 255, 255))
        X_recon = X_recon[0, :, :, :]
        X_recon = to_disp_image(X_recon, True)
        cv2.imwrite("images/outputs.jpg", outputs[0])
        cv2.imwrite("images/reconstruct.jpg", X_recon)
예제 #5
0
def init(batch_size_labeled, batch_size_pseudo, state, split, input_sizes,
         sets_id, std, mean, keep_scale, reverse_channels, data_set, valtiny,
         no_aug):
    # Return data_loaders/data_loader
    # depending on whether the state is
    # 0: Pseudo labeling
    # 1: Semi-supervised training
    # 2: Fully-supervised training
    # 3: Just testing

    # For labeled set divisions
    split_u = split.replace('-r', '')
    split_u = split_u.replace('-l', '')

    # Transformations (compatible with unlabeled data/pseudo labeled data)
    # ! Can't use torchvision.Transforms.Compose
    if data_set == 'voc':
        base = base_voc
        workers = 4
        transform_train = Compose([
            ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
            # RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]),
            RandomScale(min_scale=0.5, max_scale=1.5),
            RandomCrop(size=input_sizes[0]),
            RandomHorizontalFlip(flip_prob=0.5),
            Normalize(mean=mean, std=std)
        ])
        if no_aug:
            transform_train_pseudo = Compose([
                ToTensor(keep_scale=keep_scale,
                         reverse_channels=reverse_channels),
                Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
                Normalize(mean=mean, std=std)
            ])
        else:
            transform_train_pseudo = Compose([
                ToTensor(keep_scale=keep_scale,
                         reverse_channels=reverse_channels),
                # RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]),
                RandomScale(min_scale=0.5, max_scale=1.5),
                RandomCrop(size=input_sizes[0]),
                RandomHorizontalFlip(flip_prob=0.5),
                Normalize(mean=mean, std=std)
            ])
        # transform_pseudo = Compose(
        #     [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
        #      Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
        #      Normalize(mean=mean, std=std)])
        transform_test = Compose([
            ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
            ZeroPad(size=input_sizes[2]),
            Normalize(mean=mean, std=std)
        ])
    elif data_set == 'city':  # All the same size (whole set is down-sampled by 2)
        base = base_city
        workers = 8
        transform_train = Compose([
            ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
            # RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]),
            Resize(size_image=input_sizes[2], size_label=input_sizes[2]),
            RandomScale(min_scale=0.5, max_scale=1.5),
            RandomCrop(size=input_sizes[0]),
            RandomHorizontalFlip(flip_prob=0.5),
            Normalize(mean=mean, std=std),
            LabelMap(label_id_map_city)
        ])
        if no_aug:
            transform_train_pseudo = Compose([
                ToTensor(keep_scale=keep_scale,
                         reverse_channels=reverse_channels),
                Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
                Normalize(mean=mean, std=std)
            ])
        else:
            transform_train_pseudo = Compose([
                ToTensor(keep_scale=keep_scale,
                         reverse_channels=reverse_channels),
                # RandomResize(min_size=input_sizes[0], max_size=input_sizes[1]),
                Resize(size_image=input_sizes[2], size_label=input_sizes[2]),
                RandomScale(min_scale=0.5, max_scale=1.5),
                RandomCrop(size=input_sizes[0]),
                RandomHorizontalFlip(flip_prob=0.5),
                Normalize(mean=mean, std=std)
            ])
        # transform_pseudo = Compose(
        #     [ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
        #      Resize(size_image=input_sizes[0], size_label=input_sizes[0]),
        #      Normalize(mean=mean, std=std),
        #      LabelMap(label_id_map_city)])
        transform_test = Compose([
            ToTensor(keep_scale=keep_scale, reverse_channels=reverse_channels),
            Resize(size_image=input_sizes[2], size_label=input_sizes[2]),
            Normalize(mean=mean, std=std),
            LabelMap(label_id_map_city)
        ])
    else:
        base = ''

    # Not the actual test set (i.e.validation set)
    test_set = StandardSegmentationDataset(
        root=base,
        image_set='valtiny' if valtiny else 'val',
        transforms=transform_test,
        label_state=0,
        data_set=data_set)
    val_loader = torch.utils.data.DataLoader(dataset=test_set,
                                             batch_size=batch_size_labeled +
                                             batch_size_pseudo,
                                             num_workers=workers,
                                             shuffle=False)

    # Testing
    if state == 3:
        return val_loader
    else:
        # Fully-supervised training
        if state == 2:
            labeled_set = StandardSegmentationDataset(
                root=base,
                image_set=(str(split) + '_labeled_' + str(sets_id)),
                transforms=transform_train,
                label_state=0,
                data_set=data_set)
            labeled_loader = torch.utils.data.DataLoader(
                dataset=labeled_set,
                batch_size=batch_size_labeled,
                num_workers=workers,
                shuffle=True)
            return labeled_loader, val_loader

        # Semi-supervised training
        elif state == 1:
            pseudo_labeled_set = StandardSegmentationDataset(
                root=base,
                data_set=data_set,
                mask_type='.npy',
                image_set=(str(split_u) + '_unlabeled_' + str(sets_id)),
                transforms=transform_train_pseudo,
                label_state=1)
            labeled_set = StandardSegmentationDataset(
                root=base,
                data_set=data_set,
                image_set=(str(split) + '_labeled_' + str(sets_id)),
                transforms=transform_train,
                label_state=0)
            pseudo_labeled_loader = torch.utils.data.DataLoader(
                dataset=pseudo_labeled_set,
                batch_size=batch_size_pseudo,
                num_workers=workers,
                shuffle=True)
            labeled_loader = torch.utils.data.DataLoader(
                dataset=labeled_set,
                batch_size=batch_size_labeled,
                num_workers=workers,
                shuffle=True)
            return labeled_loader, pseudo_labeled_loader, val_loader

        else:
            # Labeling
            unlabeled_set = StandardSegmentationDataset(
                root=base,
                data_set=data_set,
                mask_type='.npy',
                image_set=(str(split_u) + '_unlabeled_' + str(sets_id)),
                transforms=transform_test,
                label_state=2)
            unlabeled_loader = torch.utils.data.DataLoader(
                dataset=unlabeled_set,
                batch_size=batch_size_labeled,
                num_workers=workers,
                shuffle=False)
            return unlabeled_loader
예제 #6
0
parser.add_argument("--epochs", default=300, type=int, help="number of epochs\
     to train the model")

opt = parser.parse_args()

# all hyperparameters have been set to the values used by Stergios et al.

affine_transform = opt.linear
alpha = beta = 1e-6
num_epochs = opt.epochs
batch_size = 2      # works for Nvidia GeForce GTX 1080 (as mentioned in paper)
val_batch_size = 4  # may be able to validate with larger batch, depends on GPU
train_path = './../data/train'
val_path = './../data/val'

composed = transforms.Compose([IntensityNorm(), Rescale(), ToTensor()])
train_dataset = CTScanDataset(train_path, transform=composed)
trainloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataset = CTScanDataset(val_path, transform=composed)
valloader = DataLoader(val_dataset, batch_size=val_batch_size)

dev = torch.device(
    "cuda") if torch.cuda.is_available() else torch.device("cpu")

model = Register3d(trainloader[0].size, device=dev, linear=affine_transform)
model.to(dev)

loss_func = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1,
                                                 patience=50, verbose=True)
예제 #7
0
import imgaug.augmenters as iaa
from torchvision import transforms
from utils.transforms import ToTensor, PadSquare, RelativeLabels, AbsoluteLabels, ImgAug


class DefaultAug(ImgAug):
    def __init__(self, ):
        self.augmentations = iaa.Sequential([
            iaa.Dropout([0.0, 0.01]),
            iaa.Sharpen((0.0, 0.1)),
            # rotate by -45 to 45 degrees (affects segmaps)
            iaa.Affine(rotate=(-10, 10), translate_percent=(-0.1, 0.1)),
            iaa.AddToBrightness((-10, 10)),
            iaa.AddToHue((-5, 5)),
            iaa.Fliplr(0.5),
        ])


AUGMENTATION_TRANSFORMS = transforms.Compose([
    AbsoluteLabels(),
    DefaultAug(),
    PadSquare(),
    RelativeLabels(),
    ToTensor(),
])
예제 #8
0
    def __init__(
        self,
        root,
        fullsize_img_dir,
        image_size=256,
        output_size=None,
        cache_root=None,
        train=True,
        transform=None,
        target_transform=None,
        crop_type="tight",
        color=True,
        start=None,
        max_samples=None,
        use_cache=True,
        test_split="fullset",
        crop_source="bb_ground_truth",
        loader=None,
        roi_background="black",
        crop_dir="crops",
        roi_margin=None,
        median_blur_crop=False,
        normalizer=cfg.RHPE_NORMALIZER,
        **kwargs,
    ):

        print("Setting up dataset {}...".format(self.__class__.__name__))
        print("Normalizer:", normalizer)

        if not isinstance(image_size, numbers.Number):
            raise FileNotFoundError(
                f"Image size must be scalar number (image_size={image_size}).")

        if not os.path.exists(root):
            raise FileNotFoundError(f"Invalid dataset root path: '{root}'")

        if cache_root is not None and not os.path.exists(cache_root):
            raise FileNotFoundError(
                f"Invalid dataset cache path: '{cache_root}'")

        if not os.path.exists(root):
            raise FileNotFoundError(f"Image directory not found: '{root}'")

        self.fullsize_img_dir = fullsize_img_dir
        self.root = root
        self.cache_root = cache_root if cache_root is not None else self.root

        self.image_size = image_size
        if output_size is not None:
            self.output_size = output_size
        else:
            self.output_size = image_size

        if roi_margin is None:
            # crop size equals input diagonal, so images can be fully rotated
            self.roi_size = geometry.get_diagonal(image_size)
            self.margin = self.roi_size - self.image_size
        else:
            self.roi_size = image_size + roi_margin
            self.margin = roi_margin

        self.crop_dir = crop_dir
        self.test_split = test_split
        self.split = "train" if train else self.test_split
        self.train = train
        self.use_cache = use_cache
        self.crop_source = crop_source
        self.crop_type = crop_type
        self.start = start
        self.max_samples = max_samples
        self.color = color

        self.annotations = self._load_annotations(self.split)
        self._init()
        self._select_index_range()

        transforms = [CenterCrop(self.output_size)]
        transforms += [ToTensor()]
        transforms += [normalizer]
        self.crop_to_tensor = tf.Compose(transforms)

        if loader is not None:
            self.loader = loader
        else:
            self.loader = CachedCropLoader(
                fullsize_img_dir,
                self.cropped_img_dir,
                img_size=self.image_size,
                margin=self.margin,
                use_cache=self.use_cache,
                crop_type=crop_type,
                border_mode=roi_background,
                median_blur_crop=median_blur_crop,
            )

        super().__init__(root,
                         transform=transform,
                         target_transform=target_transform)
예제 #9
0
def polyfit2coords_tusimple_with_bounded_classifier(lane_pred,
                                                    img,
                                                    classifier,
                                                    classifier_resize_shape=(),
                                                    sample_rate=1,
                                                    crop_h=0,
                                                    resize_shape=None,
                                                    y_px_gap=20,
                                                    pts=None,
                                                    ord=3):
    if resize_shape is None:
        resize_shape = lane_pred.shape
        crop_h = 0
    h, w = lane_pred.shape
    H, W = resize_shape
    coordinates = []

    if pts is None:
        pts = round(H / 2 / y_px_gap)

    transform_x = Compose(Resize(classifier_resize_shape), ToTensor())

    flagged = 0
    for i in [idx for idx in np.unique(lane_pred) if idx != 0]:
        ys_pred, xs_pred = np.where(lane_pred == i)

        debug = False
        with warnings.catch_warnings():
            warnings.filterwarnings('error')
            try:
                stabilized_lane = LaneDefense.get_stabilized_lane(
                    xs_pred, ys_pred, img, sample_rate=sample_rate, ord=ord)
            except RankWarning as e:
                debug = True

        stabilized_lane = LaneDefense.get_stabilized_lane(
            xs_pred, ys_pred, img, sample_rate=sample_rate, ord=ord)
        if len(stabilized_lane) > 0:

            if debug:
                plt.imshow(stabilized_lane)
                plt.show()

            # plt.imshow(stabilized_lane)
            class_logit = classifier(
                transform_x({
                    "img": stabilized_lane
                })["img"].cuda().unsqueeze(0)).detach().cpu().numpy()
            classification = 1 / (1 + np.exp(-class_logit)) > 0.78

            # classification = output_sum > 2
            # print(output_list)
            # print(classification)
            # print("-----------")

        else:
            classification = True

        if classification:
            poly_params = np.polyfit(ys_pred, xs_pred, deg=ord)
            ys = np.array([
                h - y_px_gap / (H - crop_h) * h * i for i in range(1, pts + 1)
            ])
            xs = np.polyval(poly_params, ys)

            y_min, y_max = np.min(ys_pred), np.max(ys_pred)
            coordinates.append([[
                int(x / w * W) if x >= 0 and x < w and ys[i] >= y_min
                and ys[i] <= y_max else -1, H - y_px_gap * (i + 1)
            ] for (x, i) in zip(xs, range(pts))])
        else:
            flagged += 1

    return coordinates
예제 #10
0
def prepare_dataloaders(
    dataset_split,
    dataset_path,
    metadata_filename,
    batch_size=32,
    sample_size=-1,
    valid_split=0.1,
    test_split=0.1,
    num_worker=0,
    valid_metadata_filename=None,
    valid_dataset_dir=None,
):
    """
    Utility function to prepare dataloaders for training.

    Parameters
    ----------
    dataset_split : str
        Any of 'train', 'extra', 'test'.
    dataset_path : str
        Absolute path to the dataset. (i.e. .../data/SVHN/train')
    metadata_filename : str
        Absolute path to the metadata pickle file.
    batch_size : int
        Mini-batch size.
    sample_size : int
        Number of elements to use as sample size,
        for debugging purposes only. If -1, use all samples.
    valid_split : float
        Returns a validation split of %size; valid_split*100,
        valid_split should be in range [0,1].

    Returns
    -------
    if dataset_split in ['train', 'extra']:
        train_loader: torch.utils.DataLoader
            Dataloader containing training data.
        valid_loader: torch.utils.DataLoader
            Dataloader containing validation data.

    if dataset_split in ['test']:
        test_loader: torch.utils.DataLoader
            Dataloader containing test data.

    """

    assert dataset_split in ["train", "test", "extra"], "check dataset_split"

    metadata = load_obj(metadata_filename)

    #  dataset_path = datadir / dataset_split

    firstcrop = FirstCrop(0.3)
    downscale = Rescale((64, 64))
    random_crop = RandomCrop((54, 54))
    to_tensor = ToTensor()
    normalize = None
    # normalize = Normalize((0.434, 0.442, 0.473), (0.2, 0.202, 0.198))

    # Declare transformations

    transform = transforms.Compose(
        [firstcrop, downscale, random_crop, to_tensor])
    test_transform = transforms.Compose(
        [FirstCrop(0.1), Rescale((54, 54)), to_tensor])

    dataset = SVHNDataset(
        metadata,
        data_dir=dataset_path,
        transform=transform,
        normalize_transform=normalize,
    )

    dataset_length = len(metadata)

    indices = np.arange(dataset_length)
    # Only use a sample amount of data
    if sample_size != -1:
        indices = indices[:sample_size]
        dataset_length = sample_size

    if dataset_split in ["train", "extra"]:

        # Prepare a train and validation dataloader
        valid_loader = None
        if valid_dataset_dir is not None:
            valid_metadata = load_obj(valid_metadata_filename)
            valid_dataset = SVHNDataset(
                valid_metadata,
                data_dir=valid_dataset_dir,
                transform=test_transform,
                normalize_transform=normalize,
            )
            valid_loader = DataLoader(
                valid_dataset,
                batch_size=batch_size,
                shuffle=False,
                num_workers=num_worker,
            )

        train_sampler = torch.utils.data.SubsetRandomSampler(indices)
        train_loader = DataLoader(
            dataset,
            batch_size=batch_size,
            shuffle=False,
            sampler=train_sampler,
            num_workers=num_worker,
        )

        return train_loader, valid_loader

    elif dataset_split in ["test"]:

        test_sampler = torch.utils.data.SequentialSampler(indices)

        # change the transformer pipeline
        dataset.transform = test_transform

        # Prepare a test dataloader
        test_loader = DataLoader(
            dataset,
            batch_size=batch_size,
            num_workers=4,
            shuffle=False,
            sampler=test_sampler,
        )

        return test_loader
예제 #11
0
def prepare_test_dataloader(dataset_path, metadata_filename, batch_size,
                            sample_size):
    """
    Utility function to prepare dataloaders for testing.

    Parameters of the configuration (cfg)
    -------------------------------------
    dataset_path : str
        Absolute path to the test dataset. (i.e. .../data/SVHN/test')
    metadata_filename : str
        Absolute path to the metadata pickle file.
    batch_size : int
        Mini-batch size.
    sample_size : int
        Number of elements to use as sample size,
        for debugging purposes only. If -1, use all samples.
    valid_split : float
        Returns a validation split of %size; valid_split*100,
        valid_split should be in range [0,1].

    Returns
    -------
        test_loader: torch.utils.DataLoader
            Dataloader containing test data.

    """

    firstcrop = FirstCrop(0.3)
    rescale = Rescale((64, 64))
    random_crop = RandomCrop((54, 54))
    to_tensor = ToTensor()

    # Declare transformations

    transform = transforms.Compose([
        firstcrop,
        rescale,
        random_crop,
        to_tensor,
    ])

    # Load metadata file
    metadata = load_obj(metadata_filename)

    # Initialize Dataset
    dataset = SVHNDataset(metadata, data_dir=dataset_path, transform=transform)

    indices = np.arange(len(metadata))

    # Only use a sample amount of data
    if sample_size != -1:
        indices = indices[:sample_size]

    test_sampler = torch.utils.data.SequentialSampler(indices)

    # Prepare a test dataloader
    test_loader = DataLoader(dataset,
                             batch_size=batch_size,
                             num_workers=4,
                             shuffle=False,
                             sampler=test_sampler)
    return test_loader
예제 #12
0
def prepare_dataloaders(cfg):

    dataset_path = cfg.INPUT_DIR
    metadata_filename = cfg.METADATA_FILENAME
    batch_size = cfg.TRAIN.BATCH_SIZE
    sample_size = cfg.TRAIN.SAMPLE_SIZE
    valid_split = cfg.TRAIN.VALID_SPLIT
    """
    Utility function to prepare dataloaders for training.

    Parameters of the configuration (cfg)
    -------------------------------------
    dataset_path : str
        Absolute path to the dataset. (i.e. .../data/SVHN/train')
    metadata_filename : str
        Absolute path to the metadata pickle file.
    batch_size : int
        Mini-batch size.
    sample_size : int
        Number of elements to use as sample size,
        for debugging purposes only. If -1, use all samples.
    valid_split : float
        Returns a validation split of %size; valid_split*100,
        valid_split should be in range [0,1].
    
    Returns
    -------
        train_loader: torch.utils.DataLoader
            Dataloader containing training data.
        valid_loader: torch.utils.DataLoader
            Dataloader containing validation data.

    """

    firstcrop = FirstCrop(0.3)
    rescale = Rescale((64, 64))
    random_crop = RandomCrop((54, 54))
    to_tensor = ToTensor()

    # Declare transformations

    transform = transforms.Compose([
        firstcrop,
        rescale,
        random_crop,
        to_tensor,
    ])

    # index 0 for train subset, 1 for extra subset
    metadata_train = load_obj(metadata_filename[0])
    metadata_extra = load_obj(metadata_filename[1])

    train_data_dir = dataset_path[0]
    extra_data_dir = dataset_path[1]

    valid_split_train = valid_split[0]
    valid_split_extra = valid_split[1]

    # Initialize the combined Dataset
    dataset = FullSVHNDataset(metadata_train,
                              metadata_extra,
                              train_data_dir,
                              extra_data_dir,
                              transform=transform)

    indices_train = np.arange(len(metadata_train))
    indices_extra = np.arange(len(metadata_train),
                              len(metadata_extra) + len(metadata_train))

    # Only use a sample amount of data
    if sample_size[0] != -1:
        indices_train = indices_train[:sample_size[0]]

    if sample_size[1] != -1:
        indices_extra = indices_extra[:sample_size[1]]

    # Select the indices to use for the train/valid split from the 'train' subset
    train_idx_train = indices_train[:round(valid_split_train *
                                           len(indices_train))]
    valid_idx_train = indices_train[round(valid_split_train *
                                          len(indices_train)):]

    # Select the indices to use for the train/valid split from the 'extra' subset
    train_idx_extra = indices_extra[:round(valid_split_extra *
                                           len(indices_extra))]
    valid_idx_extra = indices_extra[round(valid_split_extra *
                                          len(indices_extra)):]

    # Combine indices from 'train' and 'extra' as one single train/validation split
    train_idx = np.concatenate((train_idx_train, train_idx_extra))
    valid_idx = np.concatenate((valid_idx_train, valid_idx_extra))

    # Define the data samplers
    train_sampler = torch.utils.data.SubsetRandomSampler(train_idx)
    valid_sampler = torch.utils.data.SubsetRandomSampler(valid_idx)

    # Prepare a train and validation dataloader
    train_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=4,
                              sampler=train_sampler)

    valid_loader = DataLoader(dataset,
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=4,
                              sampler=valid_sampler)

    return train_loader, valid_loader
예제 #13
0
def main():

    args = get_arguments()

    # configuration
    CONFIG = Dict(yaml.safe_load(open(args.config)))

    # writer
    if CONFIG.writer_flag:
        writer = SummaryWriter(CONFIG.result_path)
    else:
        writer = None

    # DataLoaders
    train_data = PASCALVOC(
        CONFIG,
        mode="train",
        transform=Compose([
            RandomCrop(CONFIG),
            Resize(CONFIG),
            RandomFlip(),
            ToTensor(),
            Normalize(mean=get_mean(), std=get_std()),
        ])
    )

    val_data = PASCALVOC(
        CONFIG,
        mode="val",
        transform=Compose([
            RandomCrop(CONFIG),
            Resize(CONFIG),
            ToTensor(),
            Normalize(mean=get_mean(), std=get_std()),
        ])
    )

    train_loader = DataLoader(
        train_data,
        batch_size=CONFIG.batch_size,
        shuffle=True,
        num_workers=CONFIG.num_workers,
        drop_last=True
    )

    val_loader = DataLoader(
        val_data,
        batch_size=CONFIG.batch_size,
        shuffle=False,
        num_workers=CONFIG.num_workers
    )

    # load model
    print('\n------------------------Loading Model------------------------\n')

    if CONFIG.attention == 'dual':
        model = DANet(CONFIG)
        print('Dual Attintion modules will be added to this base model')
    elif CONFIG.attention == 'channel':
        model = CANet(CONFIG)
        print('Channel Attintion modules will be added to this base model')
    else:
        if CONFIG.model == 'drn_d_22':
            print(
                'Dilated ResNet D 22 w/o Dual Attention modules will be used as a model.')
            model = drn_d_22(pretrained=True, num_classes=CONFIG.n_classes)
        elif CONFIG.model == 'drn_d_38':
            print(
                'Dilated ResNet D 28 w/o Dual Attention modules will be used as a model.')
            model = drn_d_38(pretrained=True, num_classes=CONFIG.n_classes)
        else:
            print('There is no option you chose as a model.')
            print(
                'Therefore, Dilated ResNet D 22 w/o Dual Attention modules will be used as a model.')
            model = drn_d_22(pretrained=True, num_classes=CONFIG.n_classes)

    # set optimizer, lr_scheduler
    if CONFIG.optimizer == 'Adam':
        print(CONFIG.optimizer + ' will be used as an optimizer.')
        optimizer = optim.Adam(model.parameters(), lr=CONFIG.learning_rate)
    elif CONFIG.optimizer == 'SGD':
        print(CONFIG.optimizer + ' will be used as an optimizer.')
        optimizer = optim.SGD(
            model.parameters(),
            lr=CONFIG.learning_rate,
            momentum=CONFIG.momentum,
            dampening=CONFIG.dampening,
            weight_decay=CONFIG.weight_decay,
            nesterov=CONFIG.nesterov)
    elif CONFIG.optimizer == 'AdaBound':
        print(CONFIG.optimizer + ' will be used as an optimizer.')
        optimizer = adabound.AdaBound(
            model.parameters(),
            lr=CONFIG.learning_rate,
            final_lr=CONFIG.final_lr,
            weight_decay=CONFIG.weight_decay)
    else:
        print('There is no optimizer which suits to your option. \
            Instead, SGD will be used as an optimizer.')
        optimizer = optim.SGD(
            model.parameters(),
            lr=CONFIG.learning_rate,
            momentum=CONFIG.momentum,
            dampening=CONFIG.dampening,
            weight_decay=CONFIG.weight_decay,
            nesterov=CONFIG.nesterov)

    # learning rate scheduler
    if CONFIG.optimizer == 'SGD':
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, 'min', patience=CONFIG.lr_patience)
    else:
        scheduler = None

    # send the model to cuda/cpu
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model.to(device)
    if device == 'cuda':
        model = torch.nn.DataParallel(model)  # make parallel
        torch.backends.cudnn.benchmark = True

    # resume if you want
    begin_epoch = 0
    if args.resume:
        if os.path.exists(os.path.join(CONFIG.result_path, 'checkpoint.pth')):
            print('loading the checkpoint...')
            begin_epoch, model, optimizer, scheduler = \
                resume(CONFIG, model, optimizer, scheduler)
            print('training will start from {} epoch'.format(begin_epoch))

    # criterion for loss
    if CONFIG.class_weight:
        criterion = nn.CrossEntropyLoss(
            weight=get_class_weight().to(device),
            ignore_index=255
        )
    else:
        criterion = nn.CrossEntropyLoss(ignore_index=255)

    # train and validate model
    print('\n------------------------Start training------------------------\n')
    losses_train = []
    losses_val = []
    val_ious = []
    mean_ious = []
    mean_ious_without_bg = []
    best_mean_iou = 0.0

    for epoch in range(begin_epoch, CONFIG.max_epoch):
        # training
        loss_train = train(
            model, train_loader, criterion, optimizer, CONFIG, device)
        losses_train.append(loss_train)

        # validation
        val_iou, loss_val = validation(
            model, val_loader, criterion, CONFIG, device)
        val_ious.append(val_iou)
        losses_val.append(loss_val)
        if CONFIG.optimizer == 'SGD':
            scheduler.step(loss_val)

        mean_ious.append(val_ious[-1].mean().item())
        mean_ious_without_bg.append(val_ious[-1][1:].mean().item())

        # save checkpoint every 5 epoch
        if epoch % 5 == 0 and epoch != 0:
            save_checkpoint(CONFIG, epoch, model, optimizer, scheduler)

        # save a model every 50 epoch
        if epoch % 50 == 0 and epoch != 0:
            torch.save(
                model.state_dict(), os.path.join(CONFIG.result_path, 'epoch_{}_model.prm'.format(epoch)))

        if best_mean_iou < mean_ious[-1]:
            best_mean_iou = mean_ious[-1]
            torch.save(
                model.state_dict(), os.path.join(CONFIG.result_path, 'best_mean_iou_model.prm'))

        # tensorboardx
        if writer:
            writer.add_scalars(
                "loss", {
                    'loss_train': losses_train[-1],
                    'loss_val': losses_val[-1]}, epoch)
            writer.add_scalar(
                "mean_iou", mean_ious[-1], epoch)
            writer.add_scalar(
                "mean_iou_w/o_bg", mean_ious_without_bg[-1], epoch)

        print(
            'epoch: {}\tloss_train: {:.5f}\tloss_val: {:.5f}\tmean IOU: {:.3f}\tmean IOU w/o bg: {:.3f}'.format(
                epoch, losses_train[-1], losses_val[-1], mean_ious[-1], mean_ious_without_bg[-1])
        )

    torch.save(
        model.state_dict(), os.path.join(CONFIG.result_path, 'final_model.prm'))
예제 #14
0
def main(args, ECGResNet_params, model_class):

    # Merge config file with command-line arguments
    merged_dict = {**vars(args), **ECGResNet_params}
    print(args.model_name)

    transform = transforms.Compose([ToTensor(), ApplyGain(umc=False)])

    # Load train dataset
    if args.dataset == 'UMCU-Triage':
        dataset_params = json.load(open('configs/UMCU-Triage.json', 'r'))
        print('loaded dataset params')
    elif args.dataset == 'CPSC2018':
        dataset_params = json.load(open('configs/CPSC2018.json', 'r'))
        print('loaded dataset params')

        trainset = CPSC2018Dataset(
            path_labels_csv=dataset_params['train_labels_csv'],
            waveform_dir=dataset_params['data_dir'],
            OOD_classname=str(dataset_params['OOD_classname']),
            transform=transform,
            max_sample_length=dataset_params['max_sample_length'])

        validationset = CPSC2018Dataset(
            path_labels_csv=dataset_params['train_labels_csv'],
            waveform_dir=dataset_params['data_dir'],
            OOD_classname=str(dataset_params['OOD_classname']),
            transform=transform,
            max_sample_length=dataset_params['max_sample_length'])

        testset = CPSC2018Dataset(
            path_labels_csv=dataset_params['train_labels_csv'],
            waveform_dir=dataset_params['data_dir'],
            OOD_classname=str(dataset_params['OOD_classname']),
            transform=transform,
            max_sample_length=dataset_params['max_sample_length'])

    merged_dict['num_classes'] = dataset_params['num_classes']
    merged_dict['train_dataset_size'] = len(trainset)
    merged_dict['val_dataset_size'] = len(validationset)
    merged_dict['test_dataset_size'] = len(testset)

    # Initialize dataloaders
    train_loader = DataLoader(trainset,
                              batch_size=ECGResNet_params['batch_size'],
                              num_workers=8)
    val_loader = DataLoader(validationset,
                            batch_size=ECGResNet_params['batch_size'],
                            num_workers=8)
    test_loader = DataLoader(testset,
                             batch_size=ECGResNet_params['batch_size'],
                             num_workers=8)

    # Initialize model
    model = model_class(**merged_dict)
    print('Initialized {}'.format(model.__class__.__name__))

    # Initialize Logger
    tb_logger = pl_loggers.TensorBoardLogger('lightning_logs/')

    # Initialize trainer
    k = 1
    trainer = Trainer.from_argparse_args(
        args,
        max_epochs=ECGResNet_params['max_epochs'],
        logger=tb_logger,
        log_every_n_steps=k)

    # Train model
    trainer.fit(model, train_loader, val_loader)

    # Test model
    trainer.test(test_dataloaders=test_loader)

    # Save model
    model.save_results()
    tb_logger.save()
예제 #15
0
class Configuration:
    FOCAL_LOSS_INDICES = None
    CE_LOSS_INDICES = None
    BATCH_SIZE = 2
    CHECKPOINT = ""
    SAVE_FREQUENCY = 4
    CLASS_VALUE = -1
    CROP_SIZE = 256
    CUDA = True
    DATASET = {
        NetMode.TRAIN: "SmartRandomDataLoader",
        NetMode.VALIDATE: "DataLoaderCrop2D",
    }
    FOLDER_WITH_IMAGE_DATA = "/home/branislav/datasets/refuge"

    LEARNING_RATE = 1e-4
    LOSS = CrossEntropyLoss

    MODEL = "DeepLabV3p"
    NUM_CLASSES = 2
    NUM_WORKERS = 8
    NUMBER_OF_EPOCHS = 100
    OUTPUT = "ckpt"
    OUTPUT_FOLDER = "polyps"
    STRIDE = 0.5
    STRIDE_VAL = 0.5
    STRIDE_LIMIT = (1000, 0.5)  # THIS PREVENTS DATASET HALTING

    OPTIMALIZER = SGD
    VALIDATION_FREQUENCY = 1  # num epochs

    MOMENTUM = 0.9
    WEIGHT_DECAY = 1e-4
    AUGMENTATION = ComposeTransforms([
        RandomRotate(0.6),
        RandomSquaredCrop(0.85),
        RandomHorizontalFlip(),
        RandomVerticalFlip(),
        Transpose(),
        ToTensor()
    ])
    VAL_AUGMENTATION = ComposeTransforms([Transpose(), ToTensor()])
    PATH_TO_SAVED_SUBIMAGE_INFO = None  # FOLDER_WITH_IMAGE_DATA + "train/info.pkl"

    FOLDERS = {NetMode.TRAIN: "train", NetMode.VALIDATE: "train"}
    SUBFOLDERS = {
        ImagesSubfolder.IMAGES: "images/*.tif",
        ImagesSubfolder.MASKS: "mask/*.tif"
    }
    NUM_RANDOM_CROPS_PER_IMAGE = 4
    VISUALIZER = "VisualizationSaveImages"

    def serialize(self):
        output = {}
        for key in list(filter(lambda x: x.isupper(), dir(self))):
            value = getattr(self, key)
            if any(
                    map(lambda type_: isinstance(value, type_),
                        [str, float, int, tuple, list, dict])):
                output[key] = str(value)
        return output

    def __str__(self):
        serialized = self.serialize()
        return "\n".join(
            [f"{key}: {value}" for key, value in serialized.items()])

    def process_mask(self, mask):
        mask[mask > 0] = 1
        return mask
예제 #16
0
from engine import get_detection_model, train_one_epoch, evaluate, train
from utils.dataset import PennFudanDataset
from utils.transforms import Compose, ToTensor
from utils.utils import collate_fn, show_sample

root_path = "../data"
save_path = "../models"
show_example = False
num_epochs = 10
batch_size = 1
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')


if __name__ == "__main__":
    # Create Dataloader
    dataset = PennFudanDataset(root_path, transforms=Compose([ToTensor()]))

    # split the dataset in train and test set
    torch.manual_seed(42)
    indices = torch.randperm(len(dataset)).tolist()
    dataset_train = torch.utils.data.Subset(dataset, indices[:-50])
    dataset_test = torch.utils.data.Subset(dataset, indices[-50:])

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    train_loader = DataLoader(dataset_train, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
    test_loader = DataLoader(dataset_test, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)

    if show_example:
        img, target = dataset[10]
예제 #17
0
def prepare_dataloaders(dataset_split,
                        dataset_path,
                        metadata_filename,
                        batch_size=32,
                        sample_size=-1,
                        valid_split=0.8):
    '''
    Utility function to prepare dataloaders for training.

    Parameters
    ----------
    dataset_split : str
        Any of 'train', 'extra', 'test'.
    dataset_path : str
        Absolute path to the dataset. (i.e. .../data/SVHN/train')
    metadata_filename : str
        Absolute path to the metadata pickle file.
    batch_size : int
        Mini-batch size.
    sample_size : int
        Number of elements to use as sample size,
        for debugging purposes only. If -1, use all samples.
    valid_split : float
        Returns a validation split of %size; valid_split*100,
        valid_split should be in range [0,1].

    Returns
    -------
    if dataset_split in ['train', 'extra']:
        train_loader: torch.utils.DataLoader
            Dataloader containing training data.
        valid_loader: torch.utils.DataLoader
            Dataloader containing validation data.

    if dataset_split in ['test']:
        test_loader: torch.utils.DataLoader
            Dataloader containing test data.

    '''

    assert dataset_split in ['train', 'test', 'extra'], "check dataset_split"

    metadata = load_obj(metadata_filename)

    #  dataset_path = datadir / dataset_split

    firstcrop = FirstCrop(0.3)
    rescale = Rescale((64, 64))
    random_crop = RandomCrop((54, 54))
    to_tensor = ToTensor()

    # Declare transformations

    transform = transforms.Compose(
        [firstcrop, rescale, random_crop, to_tensor])

    dataset = SVHNDataset(metadata, data_dir=dataset_path, transform=transform)

    indices = np.arange(len(metadata))
    #  indices = np.random.permutation(indices)

    # Only use a sample amount of data
    if sample_size != -1:
        indices = indices[:sample_size]

    if dataset_split in ['train', 'extra']:

        train_idx = indices[:round(valid_split * len(indices))]
        valid_idx = indices[round(valid_split * len(indices)):]

        train_sampler = torch.utils.data.SubsetRandomSampler(train_idx)
        valid_sampler = torch.utils.data.SubsetRandomSampler(valid_idx)

        # Prepare a train and validation dataloader
        train_loader = DataLoader(dataset,
                                  batch_size=batch_size,
                                  shuffle=False,
                                  num_workers=4,
                                  sampler=train_sampler)

        valid_loader = DataLoader(dataset,
                                  batch_size=batch_size,
                                  shuffle=False,
                                  num_workers=4,
                                  sampler=valid_sampler)

        return train_loader, valid_loader

    elif dataset_split in ['test']:

        test_sampler = torch.utils.data.SequentialSampler(indices)

        # Prepare a test dataloader
        test_loader = DataLoader(dataset,
                                 batch_size=batch_size,
                                 num_workers=4,
                                 shuffle=False,
                                 sampler=test_sampler)

        return test_loader
예제 #18
0
def create_loaders(dataset, inputs, train_dir, val_dir, train_list, val_list,
                   shorter_side, crop_size, input_size, low_scale, high_scale,
                   normalise_params, batch_size, num_workers, ignore_label):
    """
    Args:
      train_dir (str) : path to the root directory of the training set.
      val_dir (str) : path to the root directory of the validation set.
      train_list (str) : path to the training list.
      val_list (str) : path to the validation list.
      shorter_side (int) : parameter of the shorter_side resize transformation.
      crop_size (int) : square crop to apply during the training.
      low_scale (float) : lowest scale ratio for augmentations.
      high_scale (float) : highest scale ratio for augmentations.
      normalise_params (list / tuple) : img_scale, img_mean, img_std.
      batch_size (int) : training batch size.
      num_workers (int) : number of workers to parallelise data loading operations.
      ignore_label (int) : label to pad segmentation masks with

    Returns:
      train_loader, val loader

    """
    # Torch libraries
    from torchvision import transforms
    from torch.utils.data import DataLoader, random_split
    # Custom libraries
    from utils.datasets import SegDataset as Dataset
    from utils.transforms import Normalise, Pad, RandomCrop, RandomMirror, ResizeAndScale, \
                                 CropAlignToMask, ResizeAlignToMask, ToTensor, ResizeInputs

    input_names, input_mask_idxs = ['rgb', 'depth'], [0, 2, 1]

    AlignToMask = CropAlignToMask if dataset == 'nyudv2' else ResizeAlignToMask
    composed_trn = transforms.Compose([
        AlignToMask(),
        ResizeAndScale(shorter_side, low_scale, high_scale),
        Pad(crop_size, [123.675, 116.28 , 103.53], ignore_label),
        RandomMirror(),
        RandomCrop(crop_size),
        ResizeInputs(input_size),
        Normalise(*normalise_params),
        ToTensor()
    ])
    composed_val = transforms.Compose([
        AlignToMask(),
        ResizeInputs(input_size),
        Normalise(*normalise_params),
        ToTensor()
    ])
    # Training and validation sets
    trainset = Dataset(dataset=dataset, data_file=train_list, data_dir=train_dir,
                       input_names=input_names, input_mask_idxs=input_mask_idxs,
                       transform_trn=composed_trn, transform_val=composed_val,
                       stage='train', ignore_label=ignore_label)

    validset = Dataset(dataset=dataset, data_file=val_list, data_dir=val_dir,
                       input_names=input_names, input_mask_idxs=input_mask_idxs,
                       transform_trn=None, transform_val=composed_val, stage='val',
                       ignore_label=ignore_label)
    print_log('Created train set {} examples, val set {} examples'.format(len(trainset), len(validset)))
    # Training and validation loaders
    train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers,
                              pin_memory=True, drop_last=True)
    val_loader = DataLoader(validset, batch_size=1, shuffle=False, num_workers=num_workers, pin_memory=True)

    return train_loader, val_loader