Exemple #1
0
    def __init__(self, hparams):
        super(Unet3D, self).__init__()
        self.hparams = hparams
        self.learning_rate = hparams.learning_rate
        self.data_set_dir = hparams.data_set_dir
        self.loader_kwargs = {
            'batch_size': hparams.batch_size,
            'num_workers': hparams.num_workers,
            'pin_memory': True
        }
        self.valid_split = hparams.valid_split

        num_pool = hparams.num_pool
        num_features = hparams.num_features
        patch_size = (hparams.patch_x, hparams.patch_y, hparams.patch_z)

        def encode_kwargs_fn(level):
            num_stacks = max(level, 1)
            return {'num_stacks': num_stacks}

        paired_features = generate_paired_features(num_pool, num_features)

        self.net = Unet(in_channels=1,
                        out_channels=1,
                        paired_features=paired_features,
                        pool_block=ResBlock,
                        pool_kwargs={'stride': 2},
                        up_kwargs={'attention': True},
                        encode_block=ResBlockStack,
                        encode_kwargs_fn=encode_kwargs_fn,
                        decode_block=ResBlock)

        self.loss_fn = FocalDiceCoefLoss()
        self.metrics = {'kd_dsc': DiceCoef()}

        self.tr_transform = Compose([
            RandomRescaleCrop(0.1,
                              patch_size,
                              crop_mode='random',
                              enforce_label_indices=[1]),
            RandomMirror((0.2, 0, 0)),
            RandomContrast(0.1),
            RandomBrightness(0.1),
            RandomGamma(0.1),
            CombineLabels([1, 2], 3),
            ToTensor()
        ])
        self.vd_transform = Compose(
            [RandomCrop(patch_size),
             CombineLabels([1, 2], 3),
             ToTensor()])
Exemple #2
0
 def __init__(self):
     self.trans = v_transforms.Compose([
         Normalize(bound=[-1300., 500.], cover=[0., 1.]),
         CenterCrop([48, 96, 96]),
         ToTensor(),
         Resize([48, 96, 96]),
     ])
Exemple #3
0
def get_datasets(label_columns=['study'],
                 transform=None,
                 balance_train=True,
                 ratio=0.1):
    cancer_data_dir = '/home/noskill/projects/cancer.old/data'
    dataset_dict = load_merged_dataset(cancer_data_dir)
    merged = dataset_dict['merged']
    bmc = dataset_dict['bmc']
    genes_features = dataset_dict['genes_features']
    genes_columns = genes_features.columns.to_list()[1:]
    feature_columns = genes_columns
    if transform is None:
        to_tensor = ToTensor()
        transform = DataLabelCompose(to_tensor, to_tensor)

    train_data, train_labels, val_data, val_labels = random_split(
        merged,
        feature_columns,
        label_columns,
        balance_train=balance_train,
        balance_by_study=False,
        ratio=ratio)
    # assert val_labels.mean() == 0.5
    train_set = GeneDataset(train_data, train_labels, transform)
    test_set = GeneDataset(val_data, val_labels, transform)
    return train_set, test_set
Exemple #4
0
def get_data_loader(cfg, data_dir, batch_size=None):
    batch_size = cfg["batch_size"] if batch_size is None else batch_size
    transform = transforms.Compose([
        RandomCrop(cfg["hr_crop_size"], cfg["scale"]), 
        ToTensor()])
    dataset = DIV2K(data_dir, transform=transform)
    return DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=8)
Exemple #5
0
    def __init__(self, num=1):
        from fem import noise
        from fem.training import make_noisy_transformers
        self.noisy = make_noisy_transformers()

        self.imgcrop = noise.RandomCropTransform(size=256, beta=0)
        self.resize = noise.Resize((256, 256))
        self.to_tensor = ToTensor()
        self.homography = HomographySamplerTransformer(
            num=1,
            beta=14,
            theta=0.08,
            random_scale_range=(0.8, 1.3),
            perspective=85)
        self.num = num
Exemple #6
0
def make_noisy_transformers():
    from torchvision.transforms import Compose
    from transform import RandomTransformer, ToTensor

    from noise import AdditiveGaussian, RandomBrightness, AdditiveShade, MotionBlur, SaltPepper, RandomContrast
    totensor = ToTensor()
    # ColorInversion doesn't seem to be usefull on most datasets
    transformer = [
        AdditiveGaussian(var=30),
        RandomBrightness(range=(-50, 50)),
        AdditiveShade(kernel_size_range=[45, 85],
                      transparency_range=(-0.25, .45)),
        SaltPepper(),
        MotionBlur(max_kernel_size=5),
        RandomContrast([0.6, 1.05])
    ]
    return Compose([RandomTransformer(transformer), totensor])
Exemple #7
0
 def __init__(self, cfg):
     super(Data, self).__init__()
     self.cfg = cfg
     # 下面是数据增强等
     self.randombrig = RandomBrightness()
     self.normalize = Normalize(mean=cfg.mean, std=cfg.std)
     self.randomcrop = RandomCrop()
     self.blur = RandomBlur()
     self.randomvflip = RandomVorizontalFlip()
     self.randomhflip = RandomHorizontalFlip()
     self.resize = Resize(384, 384)
     self.totensor = ToTensor()
     # 读数据
     with open(cfg.datapath + '/' + cfg.mode + '.txt', 'r') as lines:
         self.samples = []
         for line in lines:
             self.samples.append(line.strip())
Exemple #8
0
 def __init__(self, root):
     self.size = (192, 192)
     self.root = root
     if not os.path.exists(self.root):
         raise Exception("[!] {} not exists.".format(root))
     self.img_transform = Compose([
         ToTensor(),
         CenterCrop(self.size),
         # RangeNormalize(min_val=-1,max_val=1),
         # RandomFlip(),
     ])
     #sort file names
     self.input_paths = sorted(
         glob(os.path.join(self.root, '{}/*.npy'.format("val"))))
     self.name = os.path.basename(root)
     if len(self.input_paths) == 0:
         raise Exception("No validations are found in {}".format(self.root))
Exemple #9
0
def get_metagx_dataset(ratio=0.1):
    data = load_metagx_dataset('/home/noskill/projects/cancer/data/metaGxBreast/', min_genes=5000)
    merged = data['merged']
    genes_list = data['genes_features']

    train_data, train_labels, val_data, val_labels = util.random_split(merged,
                                                              genes_list,
                                                              ['study'],
                                                              balance_validation=False,
                                                              balance_by_study=False,
                                                              ratio=ratio)
    to_tensor = ToTensor()
    transform = DataLabelCompose(to_tensor, to_tensor)

    # assert val_labels.mean() == 0.5
    train_set = GeneDataset(train_data, train_labels, transform)
    test_set = GeneDataset(val_data, val_labels, transform)
    return train_set, test_set
def read_image(dir):
    transform = transforms.Compose(
        [Rescale(224), RandomCrop(223),
         Normalize(), ToTensor()])

    image = cv2.imread(dir)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    sample = {'image': image, 'keypoints': None}
    transformed_image = transform(sample)

    images = transformed_image['image']
    print(images.shape)
    images = images.type(torch.FloatTensor)
    images = images.unsqueeze(0)
    out = model(images)
    out = out.view(-1, 2)
    out = out.data * 50.0 + 100
    images = np.transpose(images, (0, 2, 3, 1)).numpy().squeeze(3)
    display(images.squeeze(0), out)
Exemple #11
0
import cv2
import torch
from torch.utils.data import DataLoader

from dataset import FightDataset
from transform import Compose, ToTensor, Resize
from model import MyNet

torch.cuda.set_device(0)
transform_ = Compose([Resize((112, 112)), ToTensor()])
xx = FightDataset("./fight_classify", tranform=transform_)

dataloader = DataLoader(xx, batch_size=1, shuffle=True)
# for i_batch, sample_batched in enumerate(dataloader):
#     print(i_batch)
#     print(sample_batched["image"].size())
dev = torch.device("cuda:0")
model = MyNet().to(dev)

criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-8, momentum=0.9)

for t in range(20):
    # Forward pass: Compute predicted y by passing x to the model
    for i_batch, sample_batched in enumerate(dataloader):
        image = sample_batched["image"]
        label = sample_batched["label"]
        # label = torch.transpose(label, 0,1)
        y_pred = model(image)
        # print(y_pred)
        # print(label)
Exemple #12
0
def main():
    args = parser.parse_args()
    save_path = 'Trainid_' + args.id
    writer = SummaryWriter(log_dir='runs/' + args.tag + str(time.time()))
    if not os.path.isdir(save_path):
        os.mkdir(save_path)
        os.mkdir(save_path + '/Checkpoint')

    train_dataset_path = 'data/train'
    val_dataset_path = 'data/valid'
    train_transform = transforms.Compose([ToTensor()])
    val_transform = transforms.Compose([ToTensor()])
    train_dataset = TrainDataset(path=train_dataset_path,
                                 transform=train_transform)
    val_dataset = TrainDataset(path=val_dataset_path, transform=val_transform)

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  shuffle=True,
                                  pin_memory=True,
                                  num_workers=4)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                shuffle=False,
                                pin_memory=True,
                                num_workers=4)

    size_train = len(train_dataloader)
    size_val = len(val_dataloader)
    print('Number of Training Images: {}'.format(size_train))
    print('Number of Validation Images: {}'.format(size_val))
    start_epoch = 0
    model = Res(n_ch=4, n_classes=9)
    class_weights = torch.Tensor([1, 1, 1, 1, 1, 1, 1, 1, 0]).cuda()
    criterion = DiceLoss()
    criterion1 = torch.nn.CrossEntropyLoss(weight=class_weights)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    if args.gpu:
        model = model.cuda()
        criterion = criterion.cuda()
        criterion1 = criterion1.cuda()

    if args.resume is not None:
        weight_path = sorted(os.listdir(save_path + '/Checkpoint/'),
                             key=lambda x: float(x[:-8]))[0]
        checkpoint = torch.load(save_path + '/Checkpoint/' + weight_path)
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        print('Loaded Checkpoint of Epoch: {}'.format(args.resume))

    for epoch in range(start_epoch, int(args.epoch) + start_epoch):
        adjust_learning_rate(optimizer, epoch)
        train(model, train_dataloader, criterion, criterion1, optimizer, epoch,
              writer, size_train)
        print('')
        val_loss = val(model, val_dataloader, criterion, criterion1, epoch,
                       writer, size_val)
        print('')
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            },
            filename=save_path + '/Checkpoint/' + str(val_loss) + '.pth.tar')
    writer.export_scalars_to_json(save_path + '/log.json')
Exemple #13
0
    import numpy as np
    import matplotlib.pyplot as plt
    from transform import Normalize, RandomScaleCrop, RandomGaussianBlur, RandomHorizontalFlip, ToTensor
    from torchvision import transforms
    from torch.utils.data import DataLoader

    trainset = JinNanDataset(
        images_dir="../data/jinnan/restricted",
        maskes_dir="../data/jinnan/mask",
        images_list="../data/jinnan/cross_validation/train_1.txt",
        transform=transforms.Compose([
            # RandomGaussianBlur(),
            RandomScaleCrop(550, 512),
            RandomHorizontalFlip(),
            Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            ToTensor()
        ]))
    print(len(trainset))
    trainloader = DataLoader(trainset,
                             batch_size=2,
                             shuffle=True,
                             num_workers=0)

    for i, sample in enumerate(trainloader):
        for j in range(sample["image"].size()[0]):
            image = sample["image"][j].numpy()
            mask = sample["mask"][j].numpy()
            image = image.transpose([1, 2, 0])
            image *= (0.229, 0.224, 0.225)
            image += (0.485, 0.456, 0.406)
            image = image * 255
Exemple #14
0
def get_merged_common_dataset(opt, skip_study=None, dataset_dict_cache=[], data_cache=[]):
    cancer_data_dir = opt.curated_breast_data_dir
    if dataset_dict_cache:
        dataset_dict = dataset_dict_cache[0]
    else:
        dataset_dict = util.load_curated(cancer_data_dir)
        dataset_dict_cache.append(dataset_dict)
    mergedCurated = dataset_dict['merged'].copy()

    if data_cache:
        data = data_cache[0]
    else:
        data = metagx_util.load_metagx_dataset(opt.metagx_data_dir, min_genes=opt.min_genes)
        data_cache.append(data)
    merged = data['merged'].copy()
    genes_list = data['genes_features'].copy()

    metagx_pos_outcome = merged[merged.posOutcome.isin([-1, 1])]
    print('num pos outcome studies {0}'.format(len(metagx_pos_outcome.study.unique())))
    if skip_study is not None:
        study_to_skip = metagx_pos_outcome.study.unique()[skip_study]
    else:
        study_to_skip = None

    merged_common = util.merge_metagx_curated(merged, mergedCurated)

    merged_treatments = list(metagx_util.treatment_columns_metagx) + util.treatment_columns_bmc
    merged_treatments = [x for x in merged_treatments if x in merged_common]
    merged_treatments = list(set(merged_treatments))
    # add continious covariates to genes
    cont_columns = [x for x in merged_treatments if len(merged_common[x].unique()) > 20]
    merged_treatments = [x for x in merged_treatments if x not in cont_columns]
    common_genes_list = [x for x in genes_list if x in merged_common]
    if opt.use_covars:
        non_genes = cont_columns + merged_treatments + ['posOutcome']
    else:
        non_genes = []
    if study_to_skip is None:
        train_data, train_labels, val_data, val_labels = util.random_split(merged_common,
                                                              common_genes_list + non_genes,
                                                              ['study', 'posOutcome'],
                                                              balance_validation=False,
                                                              balance_by_study=False,
                                                              ratio=opt.test_ratio,
                                                              to_numpy=False)
    else:
        train_data, train_labels, val_data, val_labels = next(util.split_by_study(merged_common,
                                                              common_genes_list + non_genes,
                                                              ['study', 'posOutcome'],
                                                              study=study_to_skip,
                                                              to_numpy=False))
        # it's ok to use gene expression in unsupervised model
        copy = val_data.copy()
        copy.loc[:, non_genes] = 0
        val_copy = val_labels.copy()
        val_copy.loc[:, 'posOutcome'] = 0
        train_data = pandas.concat([train_data, copy], ignore_index=True)
        train_labels = pandas.concat([train_labels, val_copy], ignore_index=True)
        print('validation study {0}'.format(study_to_skip))
        print(val_data.shape)

    train_data.fillna(0, inplace=True)
    val_data.fillna(0, inplace=True)
    to_tensor = ToTensor()
    to_float = ToType('float')
    add_age = AdditiveUniform(-0.5, 0.5, 'age')
    add_tumor_size = AdditiveUniform(-0.5, 0.5, 'tumor_size')
    add_posOutcome = AdditiveUniformTriary(0.0, 0.05, 'posOutcome')
    add_treat = Compose([AdditiveUniformTriary(0.0, 0.05, x) for x in merged_treatments])
    lst = []
    if 'posOutcome' in train_data.columns:
        lst = [add_age, add_tumor_size, add_posOutcome, add_treat]
    compose = Compose(lst + [to_tensor, to_float])
    compose_label = Compose([add_posOutcome, to_tensor, to_float])
    num_binary = len(merged_treatments + ['posOutcome'])
    num_binary = 0
    transform = DataLabelCompose(compose, compose_label)

    train_set = GeneDataset(train_data, train_labels, transform, binary=num_binary)
    test_set = GeneDataset(val_data, val_labels, transform, binary=num_binary)
    return train_set, test_set
    'kd_dsc': DiceCoef(weight=[0, 1, 0]),
    'ca_dsc': DiceCoef(weight=[0, 0, 1])
}
scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=30)
dataset = CaseDataset('data/Task00_Kidney/region_norm')
patch_size = (128, 128, 128)
train_transform = Compose([
    RandomRescaleCrop(0.1,
                      patch_size,
                      crop_mode='random',
                      enforce_label_indices=[1]),
    RandomMirror((0.5, 0.5, 0.5)),
    RandomContrast(0.1),
    RandomBrightness(0.1),
    RandomGamma(0.1),
    ToTensor()
])

valid_transform = Compose([RandomCrop(patch_size), ToTensor()])

trainer = Trainer(model=model,
                  optimizer=optimizer,
                  loss=loss,
                  metrics=metrics,
                  dataset=dataset,
                  scheduler=scheduler,
                  train_transform=train_transform,
                  valid_transform=valid_transform,
                  batch_size=2,
                  valid_split=0)
Exemple #16
0
            ims = ims[:, :32]
        return ims

    def get_multiple(self, ):
        pass


if __name__ == '__main__':
    from transform import (Compose, Normalize, Scale, CenterCrop, CornerCrop,
                           MultiScaleCornerCrop, MultiScaleRandomCrop,
                           RandomHorizontalFlip, ToTensor)
    D = EPIC_KITCHENS(
        '/mnt/nisho_data2/hyf/EPIC-annotations/EPIC_train_action_labels.csv',
        transform=Compose([
            Scale([224, 224]),
            ToTensor(255),
            Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]))
    loader = DataLoader(
        dataset=D,
        batch_size=2,
        shuffle=False,
        num_workers=8,
        pin_memory=True,
    )
    print(len(loader))
    from tqdm import tqdm
    for i, sample in tqdm(enumerate(loader)):
        pass
        # print(sample['ims'].size())  #(b, 3, cliplen, 224, 224)
        # print(sample['vid'])  #['P01_01', 'P01_01']
            print(m)


def save_models(models, path):
    for i, model in enumerate(models):
        target_dir = f'{path}/model_{i}.pt'
        torch.save(model.state_dict(), target_dir)


WORKING_DIRECTORY = 'Documents/studia/mgr/master-thesis'

if WORKING_DIRECTORY not in os.getcwd():
    os.chdir(WORKING_DIRECTORY)

dataset = FMASpectrogramsDataset(csv_file='track_mapping.txt', lookup_table='genres.txt', root_dir='spectrograms',
                                 transform=transforms.Compose([ToRGB(), Rescale((128, 128)),  MelspectrogramNormalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)), ToTensor()]))

torch.manual_seed(100)
train_size = int(0.8 * len(dataset))
validation_size = int(0.1 * len(dataset))
test_size = len(dataset) - validation_size - train_size
train_dataset, test_dataset, validation_dataset = random_split(
    dataset, [train_size, validation_size, test_size])

torch.cuda.empty_cache()
# hyper
num_epochs = 30
num_classes = 8
batch_size = 16
learning_rate = 0.001
Exemple #18
0
    return data


def show_data(n_images, nrow=5):
    dataloader = DataLoader(dataset,
                            batch_size=n_images,
                            shuffle=True,
                            num_workers=4)

    plt.figure()

    batch = next(iter(dataloader))
    images = batch['image']

    for idx, image in enumerate(images):
        image = image.permute(1, 2, 0)

        plt.subplot(n_images // nrow, nrow, idx + 1)
        plt.imshow(image)
        plt.axis('off')

    plt.show()


if __name__ == "__main__":
    dataset = BrushStrokeDataset('calligraphy_100k/labels.csv',
                                 'calligraphy_100k/images/',
                                 transform=transforms.Compose([ToTensor()]))

    show_data(25)
Exemple #19
0
loss = FocalDiceCoefLoss()
metrics = {'kd_dsc': Dice()}
scheduler = ReduceLROnPlateau(optimizer, factor=0.2, patience=60)
dataset = CaseDataset('data/Task00_Kidney/norm')
patch_size = (144, 144, 96)
train_transform = Compose([
    RandomRescaleCrop(0.1,
                      patch_size,
                      crop_mode='random',
                      enforce_label_indices=[1]),
    RandomMirror((0.2, 0, 0)),
    RandomContrast(0.1),
    RandomBrightness(0.1),
    RandomGamma(0.1),
    CombineLabels([1, 2], 3),
    ToTensor()
])

valid_transform = Compose(
    [RandomCrop(patch_size),
     CombineLabels([1, 2], 3),
     ToTensor()])

ckpt = torch.load('logs/Task00_Kidney/kd-2004052152-epoch=314.pt')
model.load_state_dict(ckpt['model_state_dict'])
optimizer.load_state_dict(ckpt['optimizer_state_dict'])

trainer = Trainer(model=model,
                  optimizer=optimizer,
                  loss=loss,
                  metrics=metrics,