Example #1
0
def get_transformv2(opt):
    transform_list = []
    # Transforms in opt.preprocess
    if 'fixsize' in opt.preprocess:
        transform_list.append(tr.Resize(286, 286, interpolation=2, p=1))
    if 'resize' in opt.preprocess:
        transform_list.append(
            tr.Resize(opt.load_size, opt.load_size, interpolation=2, p=1))
    if 'crop' in opt.preprocess:
        transform_list.append(tr.RandomCrop(opt.crop_size, opt.crop_size, p=1))
    # Transforms in colorspace
    if 'color' in opt.preprocess:
        transform_list.extend([
            tr.RandomContrast(limit=0.2, p=0.5),
            tr.RandomBrightness(limit=0.2, p=0.5),
            tr.HueSaturationValue(hue_shift_limit=20,
                                  sat_shift_limit=30,
                                  val_shift_limit=20,
                                  p=0.5),
            # tr.ISONoise()
        ])
    # Necessary transforms
    transform_list.extend([
        tr.HorizontalFlip(p=0.5),
        tr.VerticalFlip(p=0.5),
        tr.Normalize(p=1.0),
        ToTensorV2(p=1)
    ])
    return Compose(transform_list, additional_targets={'imageB': 'image'})
Example #2
0
    def __init__(
        self,
        prob=0.7,
        blur_prob=0.7,
        jitter_prob=0.7,
        rotate_prob=0.7,
        flip_prob=0.7,
    ):
        super().__init__()

        self.prob = prob
        self.blur_prob = blur_prob
        self.jitter_prob = jitter_prob
        self.rotate_prob = rotate_prob
        self.flip_prob = flip_prob

        self.transforms = al.Compose(
            [
                transforms.RandomRotate90(),
                transforms.Flip(),
                transforms.HueSaturationValue(),
                transforms.RandomBrightnessContrast(),
                transforms.Transpose(),
                OneOf([
                    transforms.RandomCrop(220, 220, p=0.5),
                    transforms.CenterCrop(220, 220, p=0.5)
                ],
                      p=0.5),
                # transforms.Resize(352,352),
                # transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
            ],
            p=self.prob)
Example #3
0
 def AlbumentationTrainTransform(self):
     tf = tc.Compose([ta.PadIfNeeded(4, 4, always_apply=True),
                     ta.RandomCrop(height=32, width=32, always_apply=True),
                     ta.Cutout(num_holes = 1, max_h_size=8, max_w_size=8, always_apply=True),
                     ta.HorizontalFlip(),
                     tp.ToTensor(dict (mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
                     ])
     return lambda img: tf(image = np.array(img))["image"]
Example #4
0
def model9_resnet_train_transforms():
  transforms = C.Compose([
    A.HorizontalFlip(),
    A.RandomCrop(height=30, width=30, p=5.0),
    #A.Cutout(num_holes=1, max_h_size=16, max_w_size=16),
    P.ToTensor(dict (mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
    ])
  return lambda img: transforms(image = np.array(img))["image"]
Example #5
0
def model12_train_transforms():
  transform = C.Compose([
    A.PadIfNeeded(min_height=70, min_width=70, border_mode=cv2.BORDER_CONSTANT,
         value=0.5),
    A.RandomCrop(height=64, width=64),
    A.HorizontalFlip(p=0.5),
    A.Cutout(num_holes=1, max_h_size=32, max_w_size=32, p=1),
    P.ToTensor(dict (mean=(0.4802, 0.4481, 0.3975), std=(0.2302, 0.2265, 0.2262)))
    ])
  return lambda img: transform(image = np.array(img))["image"]
Example #6
0
def model11_davidnet_train_transforms():
  transform = C.Compose([
    A.PadIfNeeded(min_height=36, min_width=36, border_mode=cv2.BORDER_CONSTANT,
        value=0.5),
    A.RandomCrop(height=32, width=32, p=1),
    A.HorizontalFlip(p=0.5),
    A.Cutout(num_holes=1, max_h_size=8, max_w_size=8, p=1),
    P.ToTensor(dict (mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))
    ])
  return lambda img: transform(image = np.array(img))["image"]
Example #7
0
 def AlbumentationTrainTransform(self):
     tf = tc.Compose([ta.HorizontalFlip(p=0.5),
                         ta.Rotate(limit=(-20, 20)),
                         # ta.VerticalFlip(p=0.5),
                         # ta.Cutout(num_holes=3, max_h_size=8, max_w_size=8, p=0.5),
                         # ta.Blur(),
                         # ta.ChannelShuffle(),
                         # ta.InvertImg(),
                         ta.RandomCrop(height=30, width=30, p=5.0),
                         ta.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
                         tp.ToTensor()
                         ])
     return lambda img: tf(image = np.array(img))["image"]
    def __call__(self, image: np.array):
        '''

        :param image: H*W*3
        :return:      299*299*3
        '''
        assert image.shape[2] == 3
        H = image.shape[0]
        W = image.shape[1]
        scale = max([math.ceil(299 / H), math.ceil(299 / W)])
        New_H, New_W = H * scale, W * scale
        transform = Compose([transforms.Resize(height=New_H, width=New_W),
                             transforms.RandomCrop(height=299, width=299),
                             ToTensorV2()], p=1.0)
        output = transform(image=image)['image']
        return output
Example #9
0
    def __init__(
        self,
        prob=0,
        Flip_prob=0,
        HueSaturationValue_prob=0,
        RandomBrightnessContrast_prob=0,
        crop_prob=0,
        randomrotate90_prob=0,
        elastictransform_prob=0,
        gridistortion_prob=0,
        opticaldistortion_prob=0,
        verticalflip_prob=0,
        horizontalflip_prob=0,
        randomgamma_prob=0,
        CoarseDropout_prob=0,
        RGBShift_prob=0,
        MotionBlur_prob=0,
        MedianBlur_prob=0,
        GaussianBlur_prob=0,
        GaussNoise_prob=0,
        ChannelShuffle_prob=0,
        ColorJitter_prob=0,
    ):
        super().__init__()

        self.prob = prob
        self.randomrotate90_prob = randomrotate90_prob
        self.elastictransform_prob = elastictransform_prob

        self.transforms = al.Compose(
            [
                transforms.RandomRotate90(p=randomrotate90_prob),
                transforms.Flip(p=Flip_prob),
                transforms.HueSaturationValue(p=HueSaturationValue_prob),
                transforms.RandomBrightnessContrast(
                    p=RandomBrightnessContrast_prob),
                transforms.Transpose(),
                OneOf(
                    [
                        transforms.RandomCrop(220, 220, p=0.5),
                        transforms.CenterCrop(220, 220, p=0.5),
                    ],
                    p=crop_prob,
                ),
                ElasticTransform(
                    p=elastictransform_prob,
                    alpha=120,
                    sigma=120 * 0.05,
                    alpha_affine=120 * 0.03,
                ),
                GridDistortion(p=gridistortion_prob),
                OpticalDistortion(p=opticaldistortion_prob,
                                  distort_limit=2,
                                  shift_limit=0.5),
                VerticalFlip(p=verticalflip_prob),
                HorizontalFlip(p=horizontalflip_prob),
                RandomGamma(p=randomgamma_prob),
                RGBShift(p=RGBShift_prob),
                MotionBlur(p=MotionBlur_prob, blur_limit=7),
                MedianBlur(p=MedianBlur_prob, blur_limit=9),
                GaussianBlur(p=GaussianBlur_prob, blur_limit=9),
                GaussNoise(p=GaussNoise_prob),
                ChannelShuffle(p=ChannelShuffle_prob),
                CoarseDropout(p=CoarseDropout_prob,
                              max_holes=8,
                              max_height=32,
                              max_width=32),
                ColorJitter(p=ColorJitter_prob)
                # transforms.Resize(352, 352),
                # transforms.Normalize(
                #     mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
                # ),
            ],
            p=self.prob,
        )
def main():
    args = parse_args()

    if args.name is None:
        args.name = '%s_%s' % (args.arch, datetime.now().strftime('%m%d%H'))

    if not os.path.exists('models/%s' % args.name):
        os.makedirs('models/%s' % args.name)

    if args.resume:
        args = joblib.load('models/%s/args.pkl' % args.name)
        args.resume = True

    print('Config -----')
    for arg in vars(args):
        print('- %s: %s' % (arg, getattr(args, arg)))
    print('------------')

    with open('models/%s/args.txt' % args.name, 'w') as f:
        for arg in vars(args):
            print('- %s: %s' % (arg, getattr(args, arg)), file=f)

    joblib.dump(args, 'models/%s/args.pkl' % args.name)

    if args.seed is not None and not args.resume:
        print('set random seed')
        random.seed(args.seed)
        np.random.seed(args.seed)
        torch.manual_seed(args.seed)

    if args.loss == 'BCEWithLogitsLoss':
        criterion = BCEWithLogitsLoss().cuda()
    elif args.loss == 'WeightedBCEWithLogitsLoss':
        criterion = BCEWithLogitsLoss(weight=torch.Tensor([1., 1., 1., 1., 1., 2.]),
                                      smooth=args.label_smooth).cuda()
    elif args.loss == 'FocalLoss':
        criterion = FocalLoss().cuda()
    elif args.loss == 'WeightedFocalLoss':
        criterion = FocalLoss(weight=torch.Tensor([1., 1., 1., 1., 1., 2.])).cuda()
    else:
        raise NotImplementedError

    if args.pred_type == 'all':
        num_outputs = 6
    elif args.pred_type == 'except_any':
        num_outputs = 5
    else:
        raise NotImplementedError

    cudnn.benchmark = True

    # create model
    model = get_model(model_name=args.arch,
                      num_outputs=num_outputs,
                      freeze_bn=args.freeze_bn,
                      dropout_p=args.dropout_p,
                      pooling=args.pooling,
                      lp_p=args.lp_p)
    model = model.cuda()

    train_transform = Compose([
        transforms.Resize(args.img_size, args.img_size),
        transforms.HorizontalFlip() if args.hflip else NoOp(),
        transforms.VerticalFlip() if args.vflip else NoOp(),
        transforms.ShiftScaleRotate(
            shift_limit=args.shift_limit,
            scale_limit=args.scale_limit,
            rotate_limit=args.rotate_limit,
            border_mode=cv2.BORDER_CONSTANT,
            value=0,
            p=args.shift_scale_rotate_p
        ) if args.shift_scale_rotate else NoOp(),
        transforms.RandomContrast(
            limit=args.contrast_limit,
            p=args.contrast_p
        ) if args.contrast else NoOp(),
        RandomErase() if args.random_erase else NoOp(),
        transforms.CenterCrop(args.crop_size, args.crop_size) if args.center_crop else NoOp(),
        ForegroundCenterCrop(args.crop_size) if args.foreground_center_crop else NoOp(),
        transforms.RandomCrop(args.crop_size, args.crop_size) if args.random_crop else NoOp(),
        transforms.Normalize(mean=model.mean, std=model.std),
        ToTensor(),
    ])

    if args.img_type:
        stage_1_train_dir = 'processed/stage_1_train_%s' %args.img_type
    else:
        stage_1_train_dir = 'processed/stage_1_train'

    df = pd.read_csv('inputs/stage_1_train.csv')
    img_paths = np.array([stage_1_train_dir + '/' + '_'.join(s.split('_')[:-1]) + '.png' for s in df['ID']][::6])
    labels = np.array([df.loc[c::6, 'Label'].values for c in range(6)]).T.astype('float32')

    df = df[::6]
    df['img_path'] = img_paths
    for c in range(6):
        df['label_%d' %c] = labels[:, c]
    df['ID'] = df['ID'].apply(lambda s: '_'.join(s.split('_')[:-1]))

    meta_df = pd.read_csv('processed/stage_1_train_meta.csv')
    meta_df['ID'] = meta_df['SOPInstanceUID']
    test_meta_df = pd.read_csv('processed/stage_1_test_meta.csv')
    df = pd.merge(df, meta_df, how='left')

    patient_ids = meta_df['PatientID'].unique()
    test_patient_ids = test_meta_df['PatientID'].unique()
    if args.remove_test_patient_ids:
        patient_ids = np.array([s for s in patient_ids if not s in test_patient_ids])

    train_img_paths = np.hstack(df[['img_path', 'PatientID']].groupby(['PatientID'])['img_path'].apply(np.array).loc[patient_ids].to_list()).astype('str')
    train_labels = []
    for c in range(6):
        train_labels.append(np.hstack(df[['label_%d' %c, 'PatientID']].groupby(['PatientID'])['label_%d' %c].apply(np.array).loc[patient_ids].to_list()))
    train_labels = np.array(train_labels).T

    if args.resume:
        checkpoint = torch.load('models/%s/checkpoint.pth.tar' % args.name)

    # train
    train_set = Dataset(
        train_img_paths,
        train_labels,
        transform=train_transform)
    train_loader = torch.utils.data.DataLoader(
        train_set,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        # pin_memory=True,
    )

    if args.optimizer == 'Adam':
        optimizer = optim.Adam(
            filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'AdamW':
        optimizer = optim.AdamW(
            filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'RAdam':
        optimizer = RAdam(
            filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.weight_decay)
    elif args.optimizer == 'SGD':
        optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr,
                              momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov)
    else:
        raise NotImplementedError

    if args.apex:
        amp.initialize(model, optimizer, opt_level='O1')

    if args.scheduler == 'CosineAnnealingLR':
        scheduler = lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=args.epochs, eta_min=args.min_lr)
    elif args.scheduler == 'MultiStepLR':
        scheduler = lr_scheduler.MultiStepLR(optimizer,
            milestones=[int(e) for e in args.milestones.split(',')], gamma=args.gamma)
    else:
        raise NotImplementedError

    log = {
        'epoch': [],
        'loss': [],
    }

    start_epoch = 0

    if args.resume:
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
        start_epoch = checkpoint['epoch']
        log = pd.read_csv('models/%s/log.csv' % args.name).to_dict(orient='list')

    for epoch in range(start_epoch, args.epochs):
        print('Epoch [%d/%d]' % (epoch + 1, args.epochs))

        # train for one epoch
        train_loss = train(args, train_loader, model, criterion, optimizer, epoch)

        if args.scheduler == 'CosineAnnealingLR':
            scheduler.step()

        print('loss %.4f' % (train_loss))

        log['epoch'].append(epoch)
        log['loss'].append(train_loss)

        pd.DataFrame(log).to_csv('models/%s/log.csv' % args.name, index=False)

        torch.save(model.state_dict(), 'models/%s/model.pth' % args.name)
        print("=> saved model")

        state = {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'scheduler': scheduler.state_dict(),
        }
        torch.save(state, 'models/%s/checkpoint.pth.tar' % args.name)
Example #11
0
    for i in train_data_path:
        train_img_paths.extend(glob(os.path.join(i, "images", "*")))
        train_mask_paths.extend(glob(os.path.join(i, "masks", "*")))
    train_img_paths.sort()
    train_mask_paths.sort()

    transforms = al.Compose(
        [
            transforms.RandomRotate90(),
            transforms.Flip(),
            transforms.HueSaturationValue(),
            transforms.RandomBrightnessContrast(),
            transforms.Transpose(),
            OneOf(
                [
                    transforms.RandomCrop(220, 220, p=0.5),
                    transforms.CenterCrop(220, 220, p=0.5),
                ],
                p=1,
            ),
            # transforms.Resize(352,352),
            # transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
        ],
        p=1,
    )
    dataset = KvasirDataset(train_img_paths,
                            train_mask_paths,
                            352,
                            transform=transforms,
                            type="train")
Example #12
0
# axs[0].imshow(image)
# axs[0].set_title("Original",fontsize=30)

# image_ = transforms.RandomRotate90(always_apply=True)(image=image)["image"]
# axs[1].imshow(image_)
# axs[1].set_title("Rotate",fontsize=30)

# image_ = transforms.Flip(always_apply=True)(image=image)["image"]
# axs[2].imshow(image_)
# axs[2].set_title("Flip",fontsize=30)

image_ = transforms.HueSaturationValue(always_apply=True)(image=image)["image"]
axs[0].imshow(image_)
axs[0].set_title("Saturation", fontsize=30)

image_ = transforms.RandomBrightnessContrast(always_apply=True)(
    image=image)["image"]
axs[1].imshow(image_)
axs[1].set_title("Brightness", fontsize=30)

image_ = OneOf(
    [
        transforms.RandomCrop(204, 250, p=1),
        transforms.CenterCrop(204, 250, p=1),
    ],
    p=1,
)(image=image)["image"]
axs[2].imshow(image_)
axs[2].set_title("Random Crop", fontsize=30)
Example #13
0
pmda_train_transform = transforms.Compose([
    transforms.RandomCrop(32, padding=4, padding_mode='reflect'),
    transforms.RandomHorizontalFlip(),
    transforms.CenterCrop(32),
    transforms.ToTensor(),
    transforms.Normalize(*stats)
])
train_transform = A.Compose([
    albtransforms.PadIfNeeded(min_height=40,
                              min_width=40,
                              border_mode=4,
                              value=[0, 0, 0],
                              always_apply=False,
                              p=1.),
    #albtransforms.RandomCrop(32,32,always_apply=False, p=1.0),
    albtransforms.RandomCrop(32, 32, always_apply=False, p=1.),
    #albtransforms.HorizontalFlip(1.0),
    albtransforms.HorizontalFlip(0.5),
    albtransforms.Cutout(num_holes=8,
                         max_h_size=8,
                         max_w_size=8,
                         always_apply=False,
                         p=0.1),
    A.Normalize(*stats),
    ToTensor()
])

test_transform = A.Compose([A.Normalize(*stats), ToTensor()])

# loaded only when loaddata() invoked
trainset = None
Example #14
0
        return tensor + torch.randn(tensor.size()) * self.std + self.mean


stats = ((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))  # normalize image
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

pmda_train_transform = transforms.Compose([
    transforms.RandomCrop(32, padding=4, padding_mode='reflect'),
    transforms.RandomHorizontalFlip(),
    transforms.CenterCrop(32),
    transforms.ToTensor(),
    transforms.Normalize(*stats)
])
train_transform = A.Compose([  # cutout, HorizontalFlip  
    albtransforms.RandomCrop(28, 28, always_apply=False, p=1.0),
    albtransforms.Rotate(30, p=0.1),
    albtransforms.Cutout(num_holes=8,
                         max_h_size=8,
                         max_w_size=8,
                         always_apply=False,
                         p=0.1),
    albtransforms.HorizontalFlip(1.0),
    A.Normalize(*stats),
    ToTensor()
])

test_transform = A.Compose([A.Normalize(*stats), ToTensor()])

# loaded only when loaddata() invoked
trainset = None