Ejemplo n.º 1
0
def prepare_data(csv_file, image_folder, batch_size=10):
    trainformations = transforms.Compose(
        [Rescale(250), RandomCrop(224),
         Normalize(), ToTensor()])

    dataset = FacialKeyPointDataProccessing(csv_file,
                                            image_folder,
                                            transformation=trainformations)

    validation_split = .1
    shuffle_dataset = True
    random_seed = 42

    # Creating data indices for training and validation splits:
    dataset_size = len(dataset)
    indices = list(range(dataset_size))
    split = int(np.floor(validation_split * dataset_size))
    if shuffle_dataset:
        np.random.seed(random_seed)
        np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]

    # Creating PT data samplers and loaders:
    train_sampler = SubsetRandomSampler(train_indices)
    valid_sampler = SubsetRandomSampler(val_indices)

    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=batch_size,
                                               sampler=train_sampler)
    validation_loader = torch.utils.data.DataLoader(dataset,
                                                    batch_size=batch_size,
                                                    sampler=valid_sampler)

    return train_loader, validation_loader
Ejemplo n.º 2
0
def main():
    with open("config.json", "r") as f:
        config = json.load(f)

    ## Prepare data
    data = CifarDataset(config)

    all_transforms = transforms.Compose(
        [ToGrayscale(), Normalize(), ToTensor()])

    train_data_transformed = CifarDataLoader(config,
                                             data.X_train,
                                             data.y_train,
                                             transform=all_transforms)
    train_loader = DataLoader(train_data_transformed,
                              batch_size=config["data_loader"]["batch_size"],
                              shuffle=False,
                              num_workers=4)

    if config["validation"]["split"]:
        valid_data_transformed = CifarDataLoader(config,
                                                 data.X_valid,
                                                 data.y_valid,
                                                 transform=all_transforms)
        valid_loader = DataLoader(
            valid_data_transformed,
            batch_size=config["data_loader"]["batch_size"],
            shuffle=False,
            num_workers=4)

    test_data_transformed = CifarDataLoader(config,
                                            data.X_test,
                                            data.y_test,
                                            transform=all_transforms)
    test_loader = DataLoader(test_data_transformed,
                             batch_size=config["data_loader"]["batch_size"],
                             shuffle=False,
                             num_workers=4)

    ## Create neural net
    net = LeNet()

    ## Training
    trainer = Trainer(model=net,
                      config=config,
                      train_data_loader=train_loader,
                      valid_data_loader=valid_loader,
                      test_data_loader=test_loader)
    trainer.train()

    ## Saving model parameters
    trainer.save_model_params()

    ## Evaluate test data
    trainer.evaluate()
Ejemplo n.º 3
0
 def __init__(self,
              images_A,
              images_B,
              mask=None,
              rotation=None,
              crop=None,
              normalize=None):
     self.dir_A = images_A
     self.dir_B = images_B
     self.files_A = os.listdir(images_A)
     self.files_B = os.listdir(images_B)
     self.mask = mask
     self.rotate = rotation
     self.crop = crop
     self.normalize = normalize
     self.to_tensor = ToTensor()
Ejemplo n.º 4
0
 def __init__(self,
              images,
              mask_covid,
              mask_lungs=None,
              max_rotation=None,
              rotation=None,
              crop=None,
              normalize=None,
              boundary=None,
              lung_values=None):
     self.dir = images
     self.files = os.listdir(images)
     self.mask_covid = mask_covid
     self.mask_lungs = mask_lungs
     self.max_rotation = max_rotation
     self.rotate = rotation
     self.crop = crop
     self.normalize = normalize
     self.boundary = boundary
     self.lung_values = lung_values
     self.to_tensor = ToTensor()
Ejemplo n.º 5
0
def k_fold():
    images, masks = load_train_data(TRAIN_IMAGES_PATH, TRAIN_MASKS_PATH)
    test_file_paths, test_images = load_test_data(TEST_IMAGES_PATH,
                                                  load_images=True,
                                                  to256=False)

    train_transformer = transforms.Compose([
        CropAugmenter(),
        AffineAugmenter(),
        MasksAdder(),
        ToTensor(),
        Normalize(),
        ClassAdder()
    ])

    eval_transformer = transforms.Compose(
        [MasksAdder(), ToTensor(),
         Normalize(), ClassAdder()])

    predict_transformer = transforms.Compose(
        [ToTensor(predict=True),
         Normalize(predict=True)])

    test_images_loader = build_data_loader(test_images,
                                           None,
                                           predict_transformer,
                                           batch_size=BATCH_SIZE,
                                           shuffle=False,
                                           num_workers=4,
                                           predict=True)

    k_fold = KFold(n_splits=FOLDS, random_state=RANDOM_SEED, shuffle=True)

    test_masks_folds = []

    config = AttrDict({
        'cuda_index': CUDA_ID,
        'momentum': MOMENTUM,
        'lr': LR,
        'tune_lr': TUNE_LR,
        'min_lr': MIN_LR,
        'bce_epochs': BCE_EPOCHS,
        'intermediate_epochs': INTERMEDIATE_EPOCHS,
        'cycle_length': CYCLE_LENGTH,
        'logs_dir': LOGS_DIR,
        'masks_weight': MASKS_WEIGHT,
        'class_weight': CLASS_WEIGHT,
        'val_metric_criterion': 'comp_metric'
    })

    for index, (train_index,
                valid_index) in list(enumerate(k_fold.split(images))):
        print('fold_{}\n'.format(index))

        x_train_fold, x_valid = images[train_index], images[valid_index]
        y_train_fold, y_valid = masks[train_index], masks[valid_index]

        train_data_loader = build_data_loader(x_train_fold,
                                              y_train_fold,
                                              train_transformer,
                                              batch_size=BATCH_SIZE,
                                              shuffle=True,
                                              num_workers=4,
                                              predict=False)
        val_data_loader = build_data_loader(x_valid,
                                            y_valid,
                                            eval_transformer,
                                            batch_size=BATCH_SIZE,
                                            shuffle=False,
                                            num_workers=4,
                                            predict=False)
        test_data_loader = build_data_loader(x_valid,
                                             y_valid,
                                             eval_transformer,
                                             batch_size=BATCH_SIZE,
                                             shuffle=False,
                                             num_workers=4,
                                             predict=False)

        data_loaders = AttrDict({
            'train': train_data_loader,
            'val': val_data_loader,
            'test': test_data_loader
        })

        zers = np.zeros(BCE_EPOCHS)
        zers += 0.1
        lovasz_ratios = np.linspace(0.1, 0.9, INTERMEDIATE_EPOCHS)
        lovasz_ratios = np.hstack((zers, lovasz_ratios))
        bce_ratios = 1.0 - lovasz_ratios
        loss_weights = [
            (bce_ratio, lovasz_ratio)
            for bce_ratio, lovasz_ratio in zip(bce_ratios, lovasz_ratios)
        ]

        loss = LossAggregator((nn.BCEWithLogitsLoss(), LovaszLoss()),
                              weights=[0.9, 0.1])

        metrics = {
            'binary_accuracy': BinaryAccuracy,
            'dice_coefficient': DiceCoefficient,
            'comp_metric': CompMetric
        }

        segmentor = SawSeenNet(base_channels=64, pretrained=True,
                               frozen=False).cuda(config.cuda_index)

        trainer = Trainer(config=config,
                          model=segmentor,
                          loss=loss,
                          loss_weights=loss_weights,
                          metrics=metrics,
                          data_loaders=data_loaders)

        segmentor = trainer.train(num_epochs=NUM_EPOCHS,
                                  model_pattern=MODEL_FILE_PATH +
                                  '_{}_fold.pth'.format(index))

        test_masks = predict(config,
                             segmentor,
                             test_images_loader,
                             thresholding=False)
        test_masks = trim_masks(test_masks,
                                height=IMG_SIZE_ORIGIN,
                                width=IMG_SIZE_ORIGIN)

        test_masks_folds.append(test_masks)

        np.save(FOLDS_FILE_PATH.format(index), test_masks)

    result_masks = np.zeros_like(test_masks_folds[0])

    for test_masks in test_masks_folds:
        result_masks += test_masks

    result_masks = result_masks.astype(dtype=np.float32)
    result_masks /= FOLDS
    result_masks = result_masks > THRESHOLD

    return test_file_paths, result_masks
Ejemplo n.º 6
0
"""
Implements loaders for training and testing data.
"""

from torch.utils.data import DataLoader
from torchvision.transforms import Compose

from dataset import FacialLandmarksDataset
from transformations import (Rescale, RandomCrop, Normalize, ToTensor)

__author__ = "Victor mawusi Ayi <*****@*****.**>"

data_transform = Compose(
    [Rescale(250), RandomCrop(224),
     Normalize(), ToTensor()])


def train_dataset(transforms_pipe=data_transform):
    return FacialLandmarksDataset(
        keypoints_file='/data/training_frames_keypoints.csv',
        images_dir='/data/training/',
        transforms=transforms_pipe)


def test_dataset(transforms_pipe=data_transform):
    return FacialLandmarksDataset(
        keypoints_file='/data/test_frames_keypoints.csv',
        images_dir='/data/test/',
        transforms=transforms_pipe)