Пример #1
0
def get_transform_2(train):
    transforms = []
    # converts the image, a PIL image, into a PyTorch Tensor
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)
Пример #2
0
def get_transform(train, p=0.5):
    transforms = []
    transforms.append(ToTensor(
    ))  # one of ToTensor()'s function is converts [0, 255] to [0, 1]
    if train:
        transforms.append(RandomHorizontalFlip(p))
    return Compose(transforms)
Пример #3
0
def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    # transforms.append(T.Normalize(mean=(0.485, 0.456, 0.406),std=(0.229, 0.224, 0.225)))
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)
Пример #4
0
def generate_dataset(in_dir, out_dir, ann_ext, **kwargs):
    """
    Generates a single-beat dataset based on a folder containing WFDB records.
    """
    transforms = []
    if kwargs['transform_lpf']:
        transforms.append(ecgbc.dataset.transforms.LowPassFilterWFDB())
    if kwargs['transform_sma']:
        transforms.append(ecgbc.dataset.transforms.SubtractMovingAverageWFDB())

    wfdb_dataset = ecgbc.dataset.wfdb_dataset.WFDBDataset(
        root_path=in_dir,
        recname_pattern=kwargs['rec_pattern'],
        transform=torchvision.transforms.Compose(transforms))

    generator = ecgbc.dataset.wfdb_single_beat.Generator(
        wfdb_dataset,
        in_ann_ext=ann_ext,
        out_ann_ext=f'ecg{ann_ext}',
        calculate_rr_features=True,
        filter_rri=kwargs['filter_rri'],
        aami_compatible=kwargs['aami'])

    generator.write(out_dir)

    dataset = ecgbc.dataset.wfdb_single_beat.SingleBeatDataset(out_dir)
    print(dataset)
def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    # transforms.append(T.Normalize(mean=(0.3520, 0.3520, 0.3520),std=(0.2930, 0.2930, 0.2930)))
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)
Пример #6
0
def get_transform(train):
    transforms = []
    # converts the image, a PIL image, into a PyTorch Tensor
    transforms.append(T.ToTensor())
    if train:
        # during training, randomly flip the training images
        # and ground-truth for data augmentation
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)
Пример #7
0
def get_transform(train=True):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        #         transforms.append(T.Occlude((0, 1.0)))
        pass
    else:
        #         transforms.append(T.Occlude((0.4, 0.8)))
        transforms.append(T.ToPILImage())
    return T.Compose(transforms)
Пример #8
0
def get_transform(cfg):
    transforms = []
    for t in cfg:
        name, params = list(t.items())[0]
        if params is None:
            params = {}
        t = getattr_mods([
            torchvision.transforms,
            horch.transforms,
        ], name)(**params)
        transforms.append(t)
    return Compose(transforms)
Пример #9
0
    def __call__(self, img):
        transforms = []
        noise = np.random.choice([1, 2])
        noise = 2
        if noise == 1:
            transforms.append(
                Lambda(lambda img: self.camera_noise(img, self.sigma)))
        else:
            transforms.append(
                Lambda(lambda img: self.gaussian_noise(img, self.sigma)))

        transform = torchvision.transforms.Compose(transforms)
        return transform(img)
Пример #10
0
    def get_params(brightness, contrast, saturation, hue):
        """Get a randomized transform to be applied on image.
        Arguments are same as that of __init__.
        Returns:
            Transform which randomly adjusts brightness, contrast and
            saturation in a random order.
        """
        transforms = []
        if brightness > 0:
            brightness_factor = random.uniform(max(0, 1 - brightness), 1 + brightness)
            transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))

        if contrast > 0:
            contrast_factor = random.uniform(max(0, 1 - contrast), 1 + contrast)
            transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))

        if saturation > 0:
            saturation_factor = random.uniform(max(0, 1 - saturation), 1 + saturation)
            transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))

        if hue > 0:
            hue_factor = random.uniform(-hue, hue)
            transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))

        random.shuffle(transforms)

        transform = Compose(transforms)

        return transform
Пример #11
0
    def get_params(brightness, contrast, saturation, hue):
        """Get a randomized transform to be applied on image.
        Arguments are same as that of __init__.
        Returns:
            Transform which randomly adjusts brightness, contrast and
            saturation in a random order.
        """
        transforms = []

        if brightness is not None:
            brightness_factor = random.uniform(brightness[0], brightness[1])
            transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))

        if contrast is not None:
            contrast_factor = random.uniform(contrast[0], contrast[1])
            transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))

        if saturation is not None:
            saturation_factor = random.uniform(saturation[0], saturation[1])
            transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))

        if hue is not None:
            hue_factor = random.uniform(hue[0], hue[1])
            transforms.append(torchvision.transforms.Lambda(lambda img: F.adjust_hue(img, hue_factor)))

        random.shuffle(transforms)
        transform = torchvision.transforms.Compose(transforms)

        return transform
Пример #12
0
def get_transform(train):
    base_size = 520
    crop_size = 480

    min_size = int((0.5 if train else 1.0) * base_size)
    max_size = int((2.0 if train else 1.0) * base_size)
    transforms = []
    transforms.append(T.RandomResize(min_size, max_size))
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
        transforms.append(T.RandomCrop(crop_size))
    transforms.append(T.ToTensor())
    transforms.append(
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))

    return T.Compose(transforms)
Пример #13
0
    def __init__(self, opts, phase='train', input_dim=3):
        self.dataroot = opts.dataroot
        images = os.listdir(os.path.join(self.dataroot, phase))
        self.img_A = [os.path.join(self.dataroot, phase, x) for x in images]
        self.size = len(self.img_A)
        self.input_dim = input_dim
        self.img_B = [os.path.join(self.dataroot, phase, x) for x in images]
        self.pair = opts.pair
        if self.pair == 'False':
            random.shuffle(self.img_B)

        # setup image transformation
        transforms = [
            Resize((opts.crop_size, opts.crop_size * 2), Image.BICUBIC)
        ]
        #transforms.append(CenterCrop(opts.crop_size))
        transforms.append(ToTensor())
        transforms.append(Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
        self.transforms = Compose(transforms)
        print('edge2shoes: %d images' % (self.size))
        return
Пример #14
0
    def create_transforms(self):
        transforms = []

        # clipping to remove outliers (if any)
        # clip_intensity = Lambda(VolumeDataset.clip_image, types_to_apply=[torchio.INTENSITY])
        # transforms.append(clip_intensity)

        rescale = RescaleIntensity((-1, 1), percentiles=(0.5, 99.5))
        # normalize with mu = 0 and sigma = 1/3 to have data in -1...1 almost
        # ZNormalization()

        transforms.append(rescale)

        # transforms = [rescale]
        # # As RandomAffine is faster then RandomElasticDeformation, we choose to
        # # apply RandomAffine 80% of the times and RandomElasticDeformation the rest
        # # Also, there is a 25% chance that none of them will be applied
        # if self.opt.isTrain:
        #     spatial = OneOf(
        #         {RandomAffine(translation=5): 0.8, RandomElasticDeformation(): 0.2},
        #         p=0.75,
        #     )
        #     transforms += [RandomFlip(axes=(0, 2), p=0.8), spatial]

        self.ratio = self.min_size / np.max(self.input_size)
        transforms.append(Resample(self.ratio))
        transforms.append(CropOrPad(self.input_size))
        transform = Compose(transforms)
        return transform
Пример #15
0
    def __init__(self, opts):
        self.dataroot = opts.dataroot

        # A
        images_A = os.listdir(os.path.join(self.dataroot, opts.phase + 'A'))
        self.A = [
            os.path.join(self.dataroot, opts.phase + 'A', x) for x in images_A
        ]

        # B
        images_B = os.listdir(os.path.join(self.dataroot, opts.phase + 'B'))
        self.B = [
            os.path.join(self.dataroot, opts.phase + 'B', x) for x in images_B
        ]

        self.A_size = len(self.A)
        self.B_size = len(self.B)
        self.dataset_size = max(self.A_size, self.B_size)
        self.input_dim_A = opts.input_dim_a
        self.input_dim_B = opts.input_dim_b

        # setup image transformation
        transforms = [
            Resize((opts.resize_size, opts.resize_size), Image.BICUBIC)
        ]
        if opts.phase == 'train':
            transforms.append(RandomCrop(opts.crop_size))
        else:
            transforms.append(CenterCrop(opts.crop_size))
        if not opts.no_flip:
            transforms.append(RandomHorizontalFlip())
        transforms.append(ToTensor())
        transforms.append(Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]))
        # transforms.append(Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
        self.transforms = Compose(transforms)
        print('A: %d, B: %d images' % (self.A_size, self.B_size))
        return
Пример #16
0
def get_transform(settings):
    transform = []
    if not settings.isToTensor:
        transforms.append(T.ToTensor())
    if not settings.isRandomHorizontalFlip:
        if not settings.mRandomHorizontalFlip:
            transforms.append(
                T.RandomHorizontalFlip(settings.mRandomHorizontalFlip))
        else:
            transforms.append(T.RandomHorizontalFlip(0.5))
    def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4):
        transforms = []
        if brightness != 0:
            transforms.append(Brightness(brightness))
        if contrast != 0:
            transforms.append(Contrast(contrast))
        if saturation != 0:
            transforms.append(Saturation(saturation))

        RandomOrder.__init__(self, transforms)
Пример #18
0
def get_transform(train):
    transforms = []
    # converts the image, a PIL image, into a PyTorch Tensor
    transforms.append(T.ToTensor())
    if train:
        # during training, randomly flip the training images
        # and ground-truth for data augmentation
        transforms.append(T.RandomHorizontalFlip(0.5))

    transforms.append(
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
    return T.Compose(transforms)
Пример #19
0
    def __call__(self, img):
        transforms = []
        noise = np.random.choice([1, 2 , 3])
        dispersion = random.uniform(self.sigmas [0], self.sigmas[1])

        if noise == 1:
            transforms.append(Lambda(lambda img: self.camera_noise(img, dispersion)))

        elif noise == 2:
            transforms.append(Lambda(lambda img: self.perlin_noise(img, dispersion)))
            transforms.append(Lambda(lambda img: self.luma_noise(img, dispersion)))
            random.shuffle(transforms)

        else:
            transforms.append(Lambda(lambda img: self.gaussian_noise(img, dispersion)))
            random.shuffle(transforms)

        transform = torchvision.transforms.Compose(transforms)
        return transform(img)
 def get_params(brightness, contrast, saturation, hue):
     transforms = []
     if brightness is not None:
         brightness_factor = random.uniform(brightness[0], brightness[1])
         transforms.append(
             Lambda(
                 lambda img: F.adjust_brightness(img, brightness_factor)))
     if contrast is not None:
         contrast_factor = random.uniform(contrast[0], contrast[1])
         transforms.append(
             Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))
     if saturation is not None:
         saturation_factor = random.uniform(saturation[0], saturation[1])
         transforms.append(
             Lambda(
                 lambda img: F.adjust_saturation(img, saturation_factor)))
     if hue is not None:
         hue_factor = random.uniform(hue[0], hue[1])
         transforms.append(
             Lambda(lambda img: F.adjust_hue(img, hue_factor)))
     random.shuffle(transforms)
     transform = Compose(transforms)
     return transform
Пример #21
0
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms
from torch.autograd import Variable
from torch.utils.data import dataloader
from dense import FCDenseNet103
from coburn.data import loader, preprocess, postprocess

SIZE = 256  # images will be resized to SIZE x SIZE before being fed into the network
# create tranform for input data
transforms = []
resize_transform = preprocess.UniformResize(SIZE, SIZE)
transforms.append(resize_transform)

# convert the time series into an image with one channel
variance_transform = preprocess.Variance()
transforms.append(variance_transform)

# convert the input to a torch Tensor
transforms.append(preprocess.ToArray())
transforms.append(torchvision.transforms.ToTensor())

# compose the transforms
data_transform = torchvision.transforms.Compose(transforms)


def train(input, epochs=200, learning_rate=1e-4):
    dataset = loader.load('train', base_dir=input)
Пример #22
0
def init_biobank_age_dataloader(opt, shuffle_test=False):
    """
        Initialize both datasets and dataloaders
    image_size = [128, 160, 128]

    :param opt: options
    :param shuffle_test: whether to shuffle test data
    :return: dataloader
    """
    if (not opt.aug_rician_noise == None) or (not opt.aug_bspline_deformation
                                              == None) or (not opt.resize_image
                                                           == None):
        transforms = []
    else:
        transforms = None

    if opt.resize_image:
        transforms.append(ResizeImage(image_size=opt.resize_size))

    if opt.aug_rician_noise:
        transforms.append(RicianNoise(noise_level=opt.aug_rician_noise))

    if opt.aug_bspline_deformation:
        transforms.append(
            ElasticDeformationsBspline(
                num_controlpoints=opt.aug_bspline_deformation[0],
                sigma=opt.aug_bspline_deformation[1]))

    if opt.aug_rician_noise or opt.aug_bspline_deformation or opt.resize_image:
        transforms = torchvision.transforms.Compose(transforms)

    healthy_train = BiobankRegAgeDataset(image_path=opt.dataroot + '_data',
                                         label_path=opt.label_path,
                                         class_bins=opt.age_range_0,
                                         class_label=0,
                                         get_id=opt.get_id,
                                         transform=transforms)

    anomaly_train = BiobankRegAgeDataset(image_path=opt.dataroot + '_data',
                                         label_path=opt.label_path,
                                         class_bins=opt.age_range_1,
                                         class_label=1,
                                         get_id=opt.get_id,
                                         transform=transforms)

    healthy_dataloader_train, healthy_dataloader_val, healthy_dataloader_test = train_val_test_split(
        healthy_train,
        val_split=0.05,
        test_split=0.05,
        random_seed=opt.random_seed)
    anomaly_dataloader_train, anomaly_dataloader_val, anomaly_dataloader_test = train_val_test_split(
        anomaly_train,
        val_split=0.05,
        test_split=0.05,
        random_seed=opt.random_seed)

    print('Train data length: ',
          len(healthy_dataloader_train), 'Val data length: ',
          len(healthy_dataloader_val), 'Test data length: ',
          len(healthy_dataloader_test))
    print('Train data length: ',
          len(anomaly_dataloader_train), 'Val data length: ',
          len(anomaly_dataloader_val), 'Test data length: ',
          len(anomaly_dataloader_test))

    healthy_dataloader_train = torch.utils.data.DataLoader(
        healthy_dataloader_train, batch_size=opt.batch_size // 2, shuffle=True)
    anomaly_dataloader_train = torch.utils.data.DataLoader(
        anomaly_dataloader_train, batch_size=opt.batch_size // 2, shuffle=True)

    healthy_dataloader_val = torch.utils.data.DataLoader(
        healthy_dataloader_val, batch_size=opt.batch_size // 2, shuffle=True)
    anomaly_dataloader_val = torch.utils.data.DataLoader(
        anomaly_dataloader_val, batch_size=opt.batch_size // 2, shuffle=True)
    healthy_dataloader_test = torch.utils.data.DataLoader(
        healthy_dataloader_test,
        batch_size=opt.batch_size // 2,
        shuffle=shuffle_test)
    anomaly_dataloader_test = torch.utils.data.DataLoader(
        anomaly_dataloader_test,
        batch_size=opt.batch_size // 2,
        shuffle=shuffle_test)

    return healthy_dataloader_train, healthy_dataloader_val, healthy_dataloader_test, anomaly_dataloader_train, anomaly_dataloader_val, anomaly_dataloader_test
Пример #23
0
def get_transform():
    transforms = []
    transforms.append(ToTensor())
    return Compose(transforms)
Пример #24
0
def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)
Пример #25
0
def get_transform():
    transforms = []
    # converts the image, a PIL image, into a PyTorch Tensor
    transforms.append(T.ToTensor2())
    return T.Compose(transforms)
Пример #26
0
def get_transform(train):
    transforms = []
    transforms.append(T.ToTensor())
    return T.Compose(transforms)
Пример #27
0
def openSet_experiments(mode, args):
    # parser = argparse.ArgumentParser()
    # parser.add_argument("--mode",type=str)
    if mode == 'openSet-train':
        # datasets = args.splitType[1:]
        # print(datasets)
        datasetAll, labelsAll, numFramesAll, transforms = [], [], [], []
        for dt in args.dataset:
            print(dt)
            x, y, num, tr = base_dataset(dt)
            datasetAll += x
            labelsAll += y
            numFramesAll += num
            transforms.append(tr)
        combined = list(zip(datasetAll, labelsAll, numFramesAll))
        random.shuffle(combined)
        datasetAll[:], labelsAll[:], numFramesAll[:] = zip(*combined)

        train_dataset = ViolenceDataset(
            dataset=datasetAll,
            labels=labelsAll,
            numFrames=numFramesAll,
            spatial_transform=transforms[0]['train'],
            numDynamicImagesPerVideo=args.numDynamicImagesPerVideo,
            videoSegmentLength=args.videoSegmentLength,
            positionSegment=args.positionSegment,
            overlaping=args.overlapping,
            frame_skip=args.frameSkip,
            skipInitialFrames=args.skipInitialFrames,
            ppType=None,
            useKeyframes=args.useKeyframes,
            windowLen=args.windowLen)
        train_dataloader = torch.utils.data.DataLoader(
            train_dataset,
            batch_size=args.batchSize,
            shuffle=True,
            num_workers=args.numWorkers)
        dataloaders = {'train': train_dataloader}

        model, _ = initialize_model(
            model_name=args.modelType,
            num_classes=2,
            freezeConvLayers=args.freezeConvLayers,
            numDiPerVideos=args.numDynamicImagesPerVideo,
            joinType=args.joinType,
            use_pretrained=True)
        model.to(DEVICE)
        params_to_update = verifiParametersToTrain(model,
                                                   args.freezeConvLayers,
                                                   printLayers=True)
        # print(params_to_update)
        optimizer = optim.SGD(params_to_update, lr=0.001, momentum=0.9)
        exp_lr_scheduler = lr_scheduler.StepLR(optimizer,
                                               step_size=7,
                                               gamma=0.1)
        criterion = nn.CrossEntropyLoss()
        fold = 0
        checkpoint_path = None
        config = None
        if args.saveCheckpoint:
            config = {
                'dataset': args.dataset,
                'model': args.modelType,
                'numEpochs': args.numEpochs,
                'freezeConvLayers': args.freezeConvLayers,
                'numDynamicImages': args.numDynamicImagesPerVideo,
                'segmentLength': args.videoSegmentLength,
                'frameSkip': args.frameSkip,
                'skipInitialFrames': args.skipInitialFrames,
                'overlap': args.overlapping,
                'joinType': args.joinType,
                'log_dir': None,
                'useKeyframes': args.useKeyframes,
                'windowLen': args.windowLen
            }
            ss = ""
            for (key, val) in config.items():
                if key != 'log_dir':
                    ss = ss + "_{!s}={!r}".format(key, val)
            ss = ss.replace("\'", "")
            # print(ss)
            checkpoint_path = os.path.join(
                constants.PATH_RESULTS, 'OPENSET', 'checkpoints',
                'DYN_Stream-{}-fold={}'.format(ss, fold))

        phases = ['train']
        model, best_acc, val_loss_min, best_epoch = train_model(
            model,
            dataloaders,
            criterion,
            optimizer,
            num_epochs=args.numEpochs,
            patience=args.patience,
            fold=fold,
            path=checkpoint_path,
            model_config=config,
            phases=phases,
            metric_to_track='train-loss')

    elif mode == 'openSet-test':

        ## Load model
        checkpoint = torch.load(args.modelPath, map_location=DEVICE)
        model = checkpoint['model_config']['model']
        numDynamicImages = checkpoint['model_config']['numDynamicImages']
        joinType = checkpoint['model_config']['joinType']
        freezeConvLayers = checkpoint['model_config']['freezeConvLayers']
        videoSegmentLength = checkpoint['model_config']['segmentLength']
        overlapping = checkpoint['model_config']['overlap']
        frameSkip = checkpoint['model_config']['frameSkip']
        skipInitialFrames = checkpoint['model_config']['skipInitialFrames']
        useKeyframes = checkpoint['model_config']['useKeyframes']
        windowLen = checkpoint['model_config']['windowLen']

        model_, _ = initialize_model(model_name=model,
                                     num_classes=2,
                                     freezeConvLayers=freezeConvLayers,
                                     numDiPerVideos=numDynamicImages,
                                     joinType=joinType,
                                     use_pretrained=True)

        model_.to(DEVICE)
        # print(model_)
        if DEVICE == 'cuda:0':
            model_.load_state_dict(checkpoint['model_state_dict'],
                                   strict=False)
        else:
            model_.load_state_dict(checkpoint['model_state_dict'])

        datasetAll, labelsAll, numFramesAll, transforms = base_dataset(
            args.testDataset)
        test_dataset = ViolenceDataset(
            dataset=datasetAll,
            labels=labelsAll,
            numFrames=numFramesAll,
            spatial_transform=transforms['val'],
            numDynamicImagesPerVideo=numDynamicImages,
            videoSegmentLength=videoSegmentLength,
            positionSegment=None,
            overlaping=overlapping,
            frame_skip=frameSkip,
            skipInitialFrames=skipInitialFrames,
            ppType=None,
            useKeyframes=useKeyframes,
            windowLen=windowLen)
        test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                                      batch_size=8,
                                                      shuffle=True,
                                                      num_workers=4)

        test_model(model_, test_dataloader)