Exemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.3, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--limit', type=int, default=10000, help='number of images in epoch')
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.0001)
    arg('--workers', type=int, default=12)
    arg('--model',
        type=str,
        default='UNet',
        choices=['UNet', 'UNet11', 'UNet16', 'AlbuNet34'])

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 1
    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained=True)
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained=True)
    elif args.model == 'LinkNet34':
        model = LinkNet34(num_classes=num_classes, pretrained=True)
    elif args.model == 'AlbuNet':
        model = AlbuNet34(num_classes=num_classes, pretrained=True)
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    loss = LossBinary(jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names, shuffle=False, transform=None, limit=None):
        return DataLoader(dataset=AngyodysplasiaDataset(file_names,
                                                        transform=transform,
                                                        limit=limit),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=args.batch_size,
                          pin_memory=torch.cuda.is_available())

    train_file_names, val_file_names = get_split(args.fold)

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    train_transform = DualCompose([
        SquarePaddingTraining(),
        CenterCrop([574, 574]),
        HorizontalFlip(),
        VerticalFlip(),
        Rotate(),
        ImageOnly(RandomHueSaturationValue()),
        ImageOnly(Normalize())
    ])

    val_transform = DualCompose([
        SquarePaddingTraining(),
        CenterCrop([574, 574]),
        ImageOnly(Normalize())
    ])

    train_loader = make_loader(train_file_names,
                               shuffle=True,
                               transform=train_transform,
                               limit=args.limit)
    valid_loader = make_loader(val_file_names, transform=val_transform)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=validation_binary,
                fold=args.fold)
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--name', type=str)
    arg('--jaccard-weight', default=0.25, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--output-dir', default='../data/runs', help='checkpoint root')
    arg('--batch-size', type=int, default=32)
    arg('--iter-size', type=int, default=1)
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.0001)
    arg('--workers', type=int, default=4)
    arg('--seed', type=int, default=0)
    arg('--model', type=str, default=models.archs[0], choices=models.archs)
    arg('--loss',
        type=str,
        default='focal',
        choices=[
            'focal', 'lovasz', 'bjd', 'bce_jaccard', 'bce_dice', 'cos_dice',
            'hinge'
        ])
    arg('--focal-gamma', type=float, default=.5)
    arg('--num-channels', type=int, default=3)
    arg('--weighted-sampler', action="store_true")
    arg('--ignore-empty-masks', action='store_true')
    arg('--remove-suspicious', action='store_true')
    arg('--resume', action="store_true")
    args = parser.parse_args()

    random.seed(args.seed)
    torch.manual_seed(args.seed)

    if not args.name:
        experiment = uuid.uuid4().hex
    else:
        experiment = args.name

    output_dir = Path(args.output_dir) / experiment
    output_dir.mkdir(exist_ok=True, parents=True)
    output_dir.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    # in case --resume is provided it will be loaded later
    model = models.get_model(None, args.model)
    # model = models.get_model(f"../data/runs/exp81/model_{args.fold}.pth", args.model)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    train_ids, val_ids = dataset.get_split(args.fold)

    cudnn.benchmark = True

    train_loader = dataset.make_loader(
        train_ids,
        num_channels=args.num_channels,
        transform=dataset.train_transform(),
        shuffle=True,
        weighted_sampling=args.weighted_sampler,
        ignore_empty_masks=args.ignore_empty_masks,
        remove_suspicious=args.remove_suspicious,
        batch_size=args.batch_size,
        workers=args.workers)

    valid_loader = dataset.make_loader(
        val_ids,
        num_channels=args.num_channels,
        transform=dataset.val_transform(),
        shuffle=False,
        #batch_size=len(device_ids),
        batch_size=args.batch_size,  # len(device_ids),
        workers=args.workers)

    # optimizer = Adam([p for p in model.parameters() if p.requires_grad], lr=args.lr)
    optimizer = Adam(model.parameters(), lr=args.lr)

    # loss = LossBinary(jaccard_weight=args.jaccard_weight)
    # loss = LossBinaryMixedDiceBCE(dice_weight=0.5, bce_weight=0.5)
    if args.loss == 'focal':
        loss = FocalLoss(args.focal_gamma)
    elif args.loss == 'lovasz':
        loss = LossLovasz()
    elif args.loss == 'bjd':
        loss = BCEDiceJaccardLoss({'bce': 0.25, 'jaccard': None, 'dice': 0.75})
    elif args.loss == 'bce_jaccard':
        loss = LossBinary(args.jaccard_weight)
    elif args.loss == 'bce_dice':
        import loss2
        bce_weight = 1
        dice_weight = 2
        loss = loss2.make_loss(bce_weight, dice_weight)
    elif args.loss == 'cos_dice':
        import loss2
        loss = loss2.make_cos_dice_loss()
    elif args.loss == 'hinge':
        loss = LossHinge()

    else:
        raise NotImplementedError

    validation = validation_binary
    scheduler = ReduceLROnPlateau(optimizer,
                                  verbose=True,
                                  min_lr=1e-7,
                                  factor=0.5)
    snapshot = utils.fold_snapshot(output_dir,
                                   args.fold) if args.resume else None

    utils.train(experiment=experiment,
                output_dir=output_dir,
                optimizer=optimizer,
                args=args,
                model=model,
                criterion=loss,
                scheduler=scheduler,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=validation,
                fold=args.fold,
                batch_size=args.batch_size,
                n_epochs=args.n_epochs,
                snapshot=snapshot,
                iter_size=args.iter_size)
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.5, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.0001)
    arg('--workers', type=int, default=12)
    arg('--train_crop_height', type=int, default=1024)
    arg('--train_crop_width', type=int, default=1280)
    arg('--val_crop_height', type=int, default=1024)
    arg('--val_crop_width', type=int, default=1280)
    arg('--type',
        type=str,
        default='binary',
        choices=['binary', 'parts', 'instruments'])
    arg('--model', type=str, default='UNet', choices=moddel_list.keys())

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    if not utils.check_crop_size(args.train_crop_height,
                                 args.train_crop_width):
        print('Input image sizes should be divisible by 32, but train '
              'crop sizes ({train_crop_height} and {train_crop_width}) '
              'are not.'.format(train_crop_height=args.train_crop_height,
                                train_crop_width=args.train_crop_width))
        sys.exit(0)

    if not utils.check_crop_size(args.val_crop_height, args.val_crop_width):
        print('Input image sizes should be divisible by 32, but validation '
              'crop sizes ({val_crop_height} and {val_crop_width}) '
              'are not.'.format(val_crop_height=args.val_crop_height,
                                val_crop_width=args.val_crop_width))
        sys.exit(0)

    if args.type == 'parts':
        num_classes = 4
    elif args.type == 'instruments':
        num_classes = 8
    else:
        num_classes = 1

    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    else:
        model_name = moddel_list[args.model]
        model = model_name(num_classes=num_classes, pretrained=True)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()
    else:
        raise SystemError('GPU device not found')

    if args.type == 'binary':
        loss = LossBinary(jaccard_weight=args.jaccard_weight)

    else:
        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names,
                    shuffle=False,
                    transform=None,
                    problem_type='binary',
                    batch_size=1):
        return DataLoader(dataset=RoboticsDataset(file_names,
                                                  transform=transform,
                                                  problem_type=problem_type),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=batch_size,
                          pin_memory=torch.cuda.is_available())

    #print('sfsdgsdhsfffffffffff',args.fold)
    train_file_names, val_file_names = get_split(args.fold)

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    def train_transform(p=1):
        return Compose([
            PadIfNeeded(min_height=args.train_crop_height,
                        min_width=args.train_crop_width,
                        p=1),
            RandomCrop(height=args.train_crop_height,
                       width=args.train_crop_width,
                       p=1),
            VerticalFlip(p=0.5),
            HorizontalFlip(p=0.5),
            Normalize(p=1)
        ],
                       p=p)

    def val_transform(p=1):
        return Compose([
            PadIfNeeded(min_height=args.val_crop_height,
                        min_width=args.val_crop_width,
                        p=1),
            CenterCrop(
                height=args.val_crop_height, width=args.val_crop_width, p=1),
            Normalize(p=1)
        ],
                       p=p)

    train_loader = make_loader(train_file_names,
                               shuffle=True,
                               transform=train_transform(p=1),
                               problem_type=args.type,
                               batch_size=args.batch_size)
    valid_loader = make_loader(val_file_names,
                               transform=val_transform(p=1),
                               problem_type=args.type,
                               batch_size=len(device_ids))

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    if args.type == 'binary':
        valid = validation_binary
    else:
        valid = validation_multi

    print(model.parameters())
    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=valid,
                fold=args.fold,
                num_classes=num_classes)
Exemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser()

    arg = parser.add_argument
    arg('--jaccard-weight', default=1, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=8)
    arg('--n-epochs', type=int, default=14)
    arg('--lr', type=float, default=0.000001)
    arg('--workers', type=int, default=8)
    arg('--type',
        type=str,
        default='binary',
        choices=['binary', 'parts', 'instruments'])
    arg('--model',
        type=str,
        default='TernausNet',
        choices=['UNet', 'UNet11', 'LinkNet34', 'TernausNet'])

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    if args.type == 'parts':
        num_classes = 3
    elif args.type == 'instruments':
        num_classes = 8
    else:
        num_classes = 1

    if args.model == 'TernausNet':
        model = TernausNet34(num_classes=num_classes)
    else:
        model = TernausNet34(num_classes=num_classes)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    if args.type == 'binary':
        loss = LossBinary(jaccard_weight=args.jaccard_weight)
    else:
        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names,
                    shuffle=False,
                    transform=None,
                    mode='train',
                    problem_type='binary'):
        return DataLoader(dataset=MapDataset(file_names,
                                             transform=transform,
                                             problem_type=problem_type,
                                             mode=mode),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=args.batch_size,
                          pin_memory=torch.cuda.is_available())

    # labels = pd.read_csv('data/stage1_train_labels.csv')
    # labels = os.listdir('data/stage1_train_')
    # train_file_names, val_file_names = train_test_split(labels, test_size=0.2, random_state=42)

    # print('num train = {}, num_val = {}'.format(len(train_file_names), len(val_file_names)))

    # train_transform = DualCompose([
    #     HorizontalFlip(),
    #     VerticalFlip(),
    #     RandomCrop([256, 256]),
    #     RandomRotate90(),
    #     ShiftScaleRotate(),
    #     ImageOnly(RandomHueSaturationValue()),
    #     ImageOnly(RandomBrightness()),
    #     ImageOnly(RandomContrast()),
    #     ImageOnly(Normalize())
    # ])
    train_transform = DualCompose([
        OneOrOther(*(OneOf([
            Distort1(distort_limit=0.05, shift_limit=0.05),
            Distort2(num_steps=2, distort_limit=0.05)
        ]),
                     ShiftScaleRotate(shift_limit=0.0625,
                                      scale_limit=0.10,
                                      rotate_limit=45)),
                   prob=0.5),
        RandomRotate90(),
        RandomCrop([256, 256]),
        RandomFlip(prob=0.5),
        Transpose(prob=0.5),
        ImageOnly(RandomContrast(limit=0.2, prob=0.5)),
        ImageOnly(RandomFilter(limit=0.5, prob=0.2)),
        ImageOnly(RandomHueSaturationValue(prob=0.2)),
        ImageOnly(RandomBrightness()),
        ImageOnly(Normalize())
    ])

    val_transform = DualCompose([
        # RandomCrop([256, 256]),
        Rescale([256, 256]),
        ImageOnly(Normalize())
    ])

    train_loader = make_loader(TRAIN_ANNOTATIONS_PATH,
                               shuffle=True,
                               transform=train_transform,
                               problem_type=args.type)
    valid_loader = make_loader(VAL_ANNOTATIONS_PATH,
                               transform=val_transform,
                               mode='valid',
                               problem_type=args.type)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    if args.type == 'binary':
        valid = validation_binary
    else:
        valid = validation_multi

    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=valid,
                fold=args.fold,
                num_classes=num_classes)
Exemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.5, type=float)
    arg('--device-ids', type=str, default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--filepath', type=str, help='folder with images and annotation masks')
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=32)
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.0001)
    arg('--workers', type=int, default=12)
    arg('--train_crop_height', type=int, default=416)
    arg('--train_crop_width', type=int, default=416)
    arg('--val_crop_height', type=int, default=416)
    arg('--val_crop_width', type=int, default=416)
    arg('--type', type=str, default='binary', choices=['binary', 'multi'])
    arg('--model', type=str, default='UNet', choices=model_list.keys())
    arg('--datatype', type=str, default='buildings',
        choices=['buildings', 'roads', 'combined'])
    arg('--pretrained', action='store_true',
        help='use pretrained network for initialisation')
    arg('--num_classes', type=int, default=1)

    args = parser.parse_args()

    timestr = time.strftime("%Y%m%d-%H%M%S")

    root = Path(args.root)
    root = Path(os.path.join(root, timestr))
    root.mkdir(exist_ok=True, parents=True)
#    dataset_type = args.filepath.split("/")[-3]
    dataset_type = args.datatype
    print('log', root, dataset_type)
    if not utils.check_crop_size(args.train_crop_height, args.train_crop_width):
        print('Input image sizes should be divisible by 32, but train '
              'crop sizes ({train_crop_height} and {train_crop_width}) '
              'are not.'.format(train_crop_height=args.train_crop_height, train_crop_width=args.train_crop_width))
        sys.exit(0)

    if not utils.check_crop_size(args.val_crop_height, args.val_crop_width):
        print('Input image sizes should be divisible by 32, but validation '
              'crop sizes ({val_crop_height} and {val_crop_width}) '
              'are not.'.format(val_crop_height=args.val_crop_height, val_crop_width=args.val_crop_width))
        sys.exit(0)

    num_classes = args.num_classes

    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    else:
        model_name = model_list[args.model]
        model = model_name(num_classes=num_classes, pretrained=args.pretrained)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()
    else:
        raise SystemError('GPU device not found')

    if args.type == 'binary':
        loss = LossBinary(jaccard_weight=args.jaccard_weight)
    elif args.num_classes == 2:
        labelweights = [89371542, 7083233]
        labelweights = np.sum(labelweights) / \
            (np.multiply(num_classes, labelweights))

        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=args.jaccard_weight, class_weights=labelweights)

    else:
        #labelweights = [30740321,3046555,1554577]
        #labelweights = labelweights / np.sum(labelweights)
        #labelweights = 1 / np.log(1.2 + labelweights)
        labelweights = [89371542, 29703049, 7083233]
        labelweights = np.sum(labelweights) / \
            (np.multiply(num_classes, labelweights))

        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=args.jaccard_weight, class_weights=labelweights)

    cudnn.benchmark = True

    train_filename = os.path.join(args.filepath, 'trainval.txt')
    val_filename = os.path.join(args.filepath, 'test.txt')

    def train_transform(p=1):
        return Compose([
            PadIfNeeded(min_height=args.train_crop_height,
                        min_width=args.train_crop_width, p=1),
            RandomCrop(height=args.train_crop_height,
                       width=args.train_crop_width, p=1),
            VerticalFlip(p=0.5),
            HorizontalFlip(p=0.5),
            Normalize(p=1)
        ], p=p)

    def val_transform(p=1):
        return Compose([
            PadIfNeeded(min_height=args.val_crop_height,
                        min_width=args.val_crop_width, p=1),
            CenterCrop(height=args.val_crop_height,
                       width=args.val_crop_width, p=1),
            Normalize(p=1)
        ], p=p)

    train_loader = make_loader(train_filename, shuffle=True, transform=train_transform(
        p=1), problem_type=args.type, batch_size=args.batch_size, datatype=args.datatype)
    valid_loader = make_loader(val_filename, transform=val_transform(p=1), problem_type=args.type,
                               batch_size=len(device_ids), datatype=args.datatype)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))
    args.root = root
    if args.type == 'binary':
        valid = validation_binary
    else:
        valid = validation_multi

    utils.train(
        init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
        args=args,
        model=model,
        criterion=loss,
        train_loader=train_loader,
        valid_loader=valid_loader,
        validation=valid,
        num_classes=num_classes,
        model_name=args.model,
        dataset_type=dataset_type
    )
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=1, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.0001)
    arg('--workers', type=int, default=8)
    arg('--type',
        type=str,
        default='binary',
        choices=['binary', 'parts', 'instruments'])
    arg('--model',
        type=str,
        default='UNet',
        choices=['UNet', 'UNet11', 'LinkNet34'])

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    if args.type == 'parts':
        num_classes = 4
    elif args.type == 'instruments':
        num_classes = 8
    else:
        num_classes = 1

    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained='vgg')
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained='vgg')
    elif args.model == 'LinkNet34':
        model = LinkNet34(num_classes=num_classes, pretrained=True)
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    if args.type == 'binary':
        loss = LossBinary(jaccard_weight=args.jaccard_weight)
    else:
        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names,
                    shuffle=False,
                    transform=None,
                    problem_type='binary'):
        return DataLoader(dataset=RoboticsDataset(file_names,
                                                  transform=transform,
                                                  problem_type=problem_type),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=args.batch_size,
                          pin_memory=torch.cuda.is_available())

    train_file_names, val_file_names = get_split(args.fold)

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    train_transform = DualCompose(
        [HorizontalFlip(),
         VerticalFlip(),
         ImageOnly(Normalize())])

    val_transform = DualCompose([ImageOnly(Normalize())])

    train_loader = make_loader(train_file_names,
                               shuffle=True,
                               transform=train_transform,
                               problem_type=args.type)
    valid_loader = make_loader(val_file_names,
                               transform=val_transform,
                               problem_type=args.type)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    if args.type == 'binary':
        valid = validation_binary
    else:
        valid = validation_multi

    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=valid,
                fold=args.fold,
                num_classes=num_classes)
Exemplo n.º 7
0
X_img = './train/X_img.npy'
X_dpt = './train/X_dpt.npy'
Y = './train/Y_labels.npy'

train_dataset = Load_Dataset(X_img, X_dpt, Y, False, True)

train_loader = DataLoader(train_dataset,
                          batch_size=3,
                          shuffle=False,
                          num_workers=0,
                          drop_last=True)
model = UNetVGG16(num_classes=4)

lr = 0.0001
loss = LossBinary(jaccard_weight=0.3)
#init_optimizer = lambda lr: optim.Adam(model.parameters(), lr=lr, momentum=0.9, weight_decay=0.0001)
init_optimizer = lambda lr: optim.SGD(
    model.parameters(), lr=lr, momentum=0.9, weight_decay=0.0001)
optimizer = init_optimizer(lr)
epoch_best_loss = 1.0

for epoch in range(100):  # loop over the dataset multiple times
    epoch_loss = 0.0
    running_loss = 0.0

    for idx, (inputs, targets) in enumerate(train_loader, 0):

        # generate forward prediction
        outputs = model(inputs.cuda())
Exemplo n.º 8
0
               'UNet16': UNet16,
               'UNet': UNet,
               'AlbuNet': AlbuNet,
               'SeRes50NextHyper': SeRes50NextHyper,
               'SE_ResNeXt_50': SE_ResNeXt_50,
               'DenseNet161': DenseNet161,
               'LinkNeXt': LinkNeXt,
               'LinkNet34': LinkNet34,
               'GCN': GCN,
               'Incv3': Incv3,
               'ResNet50_DUCHDC': ResNet50_DUCHDC}

losses = {
    'lava': LovaszHingeLoss(),
    'bce': BCEWithLogitsLoss(),
    'bce_jaccard': LossBinary(),
    'bce_lava': LovaszBCE(bce_weight=0.1),
    'focal': RobustFocalLoss2d(),
    'focal_lava': FocalLovasz(focal_weight=0.3),
    'focal_jaccard': FocalJaccard(),
    'focal_lava_jaccard': RFocalLovaszJaccard(jaccard_weight=0.15, focal_weight=0.15)
}


def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--device_ids', type=str, default='0,1,2,3', help='For example 0,1 to run on two GPUs')
    arg('--requires_grad', type=bool, default=False, help='freez encoder')
    arg('--start_epoch', type=str, default='0', help='start epoch emp 21')
    arg('--rop_step', type=int, default=6, help='reduce on plateu step')
Exemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', type=float, default=1)
    arg('--root', type=str, default='runs/debug', help='checkpoint root')
    arg('--image-path', type=str, default='data', help='image path')
    arg('--batch-size', type=int, default=2)
    arg('--n-epochs', type=int, default=100)
    arg('--optimizer', type=str, default='Adam', help='Adam or SGD')
    arg('--lr', type=float, default=0.001)
    arg('--workers', type=int, default=10)
    arg('--model',
        type=str,
        default='UNet16',
        choices=[
            'UNet', 'UNet11', 'UNet16', 'LinkNet34', 'FCDenseNet57',
            'FCDenseNet67', 'FCDenseNet103'
        ])
    arg('--model-weight', type=str, default=None)
    arg('--resume-path', type=str, default=None)
    arg('--attribute',
        type=str,
        default='all',
        choices=[
            'pigment_network', 'negative_network', 'streaks',
            'milia_like_cyst', 'globules', 'all'
        ])
    args = parser.parse_args()

    ## folder for checkpoint
    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    image_path = args.image_path

    #print(args)
    if args.attribute == 'all':
        num_classes = 5
    else:
        num_classes = 1
    args.num_classes = num_classes
    ### save initial parameters
    print('--' * 10)
    print(args)
    print('--' * 10)
    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    ## load pretrained model
    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained='vgg')
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained='vgg')
    elif args.model == 'LinkNet34':
        model = LinkNet34(num_classes=num_classes, pretrained=True)
    elif args.model == 'FCDenseNet103':
        model = FCDenseNet103(num_classes=num_classes)
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    ## multiple GPUs
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    model.to(device)

    ## load pretrained model
    if args.model_weight is not None:
        state = torch.load(args.model_weight)
        #epoch = state['epoch']
        #step = state['step']
        model.load_state_dict(state['model'])
        print('--' * 10)
        print('Load pretrained model', args.model_weight)
        #print('Restored model, epoch {}, step {:,}'.format(epoch, step))
        print('--' * 10)
        ## replace the last layer
        ## although the model and pre-trained weight have differernt size (the last layer is different)
        ## pytorch can still load the weight
        ## I found that the weight for one layer just duplicated for all layers
        ## therefore, the following code is not necessary
        # if args.attribute == 'all':
        #     model = list(model.children())[0]
        #     num_filters = 32
        #     model.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
        #     print('--' * 10)
        #     print('Load pretrained model and replace the last layer', args.model_weight, num_classes)
        #     print('--' * 10)
        #     if torch.cuda.device_count() > 1:
        #         model = nn.DataParallel(model)
        #     model.to(device)

    ## model summary
    print_model_summay(model)

    ## define loss
    loss_fn = LossBinary(jaccard_weight=args.jaccard_weight)

    ## It enables benchmark mode in cudnn.
    ## benchmark mode is good whenever your input sizes for your network do not vary. This way, cudnn will look for the
    ## optimal set of algorithms for that particular configuration (which takes some time). This usually leads to faster runtime.
    ## But if your input sizes changes at each iteration, then cudnn will benchmark every time a new size appears,
    ## possibly leading to worse runtime performances.
    cudnn.benchmark = True

    ## get train_test_id
    train_test_id = get_split()

    ## train vs. val
    print('--' * 10)
    print('num train = {}, num_val = {}'.format(
        (train_test_id['Split'] == 'train').sum(),
        (train_test_id['Split'] != 'train').sum()))
    print('--' * 10)

    train_transform = DualCompose(
        [HorizontalFlip(),
         VerticalFlip(),
         ImageOnly(Normalize())])

    val_transform = DualCompose([ImageOnly(Normalize())])

    ## define data loader
    train_loader = make_loader(train_test_id,
                               image_path,
                               args,
                               train=True,
                               shuffle=True,
                               transform=train_transform)
    valid_loader = make_loader(train_test_id,
                               image_path,
                               args,
                               train=False,
                               shuffle=True,
                               transform=val_transform)

    if True:
        print('--' * 10)
        print('check data')
        train_image, train_mask, train_mask_ind = next(iter(train_loader))
        print('train_image.shape', train_image.shape)
        print('train_mask.shape', train_mask.shape)
        print('train_mask_ind.shape', train_mask_ind.shape)
        print('train_image.min', train_image.min().item())
        print('train_image.max', train_image.max().item())
        print('train_mask.min', train_mask.min().item())
        print('train_mask.max', train_mask.max().item())
        print('train_mask_ind.min', train_mask_ind.min().item())
        print('train_mask_ind.max', train_mask_ind.max().item())
        print('--' * 10)

    valid_fn = validation_binary

    ###########
    ## optimizer
    if args.optimizer == 'Adam':
        optimizer = Adam(model.parameters(), lr=args.lr)
    elif args.optimizer == 'SGD':
        optimizer = SGD(model.parameters(), lr=args.lr, momentum=0.9)

    ## loss
    criterion = loss_fn
    ## change LR
    scheduler = ReduceLROnPlateau(optimizer,
                                  'min',
                                  factor=0.8,
                                  patience=5,
                                  verbose=True)

    ##########
    ## load previous model status
    previous_valid_loss = 10
    model_path = root / 'model.pt'
    if args.resume_path is not None and model_path.exists():
        state = torch.load(str(model_path))
        epoch = state['epoch']
        step = state['step']
        model.load_state_dict(state['model'])
        epoch = 1
        step = 0
        try:
            previous_valid_loss = state['valid_loss']
        except:
            previous_valid_loss = 10
        print('--' * 10)
        print('Restored previous model, epoch {}, step {:,}'.format(
            epoch, step))
        print('--' * 10)
    else:
        epoch = 1
        step = 0

    #########
    ## start training
    log = root.joinpath('train.log').open('at', encoding='utf8')
    writer = SummaryWriter()
    meter = AllInOneMeter()
    #if previous_valid_loss = 10000
    print('Start training')
    print_model_summay(model)
    previous_valid_jaccard = 0
    for epoch in range(epoch, args.n_epochs + 1):
        model.train()
        random.seed()
        #jaccard = []
        start_time = time.time()
        meter.reset()
        w1 = 1.0
        w2 = 0.5
        w3 = 0.5
        try:
            train_loss = 0
            valid_loss = 0
            # if epoch == 1:
            #     freeze_layer_names = get_freeze_layer_names(part='encoder')
            #     set_freeze_layers(model, freeze_layer_names=freeze_layer_names)
            #     #set_train_layers(model, train_layer_names=['module.final.weight','module.final.bias'])
            #     print_model_summay(model)
            # elif epoch == 5:
            #     w1 = 1.0
            #     w2 = 0.0
            #     w3 = 0.5
            #     freeze_layer_names = get_freeze_layer_names(part='encoder')
            #     set_freeze_layers(model, freeze_layer_names=freeze_layer_names)
            #     # set_train_layers(model, train_layer_names=['module.final.weight','module.final.bias'])
            #     print_model_summay(model)
            #elif epoch == 3:
            #     set_train_layers(model, train_layer_names=['module.dec5.block.0.conv.weight','module.dec5.block.0.conv.bias',
            #                                                'module.dec5.block.1.weight','module.dec5.block.1.bias',
            #                                                'module.dec4.block.0.conv.weight','module.dec4.block.0.conv.bias',
            #                                                'module.dec4.block.1.weight','module.dec4.block.1.bias',
            #                                                'module.dec3.block.0.conv.weight','module.dec3.block.0.conv.bias',
            #                                                'module.dec3.block.1.weight','module.dec3.block.1.bias',
            #                                                'module.dec2.block.0.conv.weight','module.dec2.block.0.conv.bias',
            #                                                'module.dec2.block.1.weight','module.dec2.block.1.bias',
            #                                                'module.dec1.conv.weight','module.dec1.conv.bias',
            #                                                'module.final.weight','module.final.bias'])
            #     print_model_summa zvgf    t5y(model)
            # elif epoch == 50:
            #     set_freeze_layers(model, freeze_layer_names=None)
            #     print_model_summay(model)
            for i, (train_image, train_mask,
                    train_mask_ind) in enumerate(train_loader):
                # inputs, targets = variable(inputs), variable(targets)

                train_image = train_image.permute(0, 3, 1, 2)
                train_mask = train_mask.permute(0, 3, 1, 2)
                train_image = train_image.to(device)
                train_mask = train_mask.to(device).type(torch.cuda.FloatTensor)
                train_mask_ind = train_mask_ind.to(device).type(
                    torch.cuda.FloatTensor)
                # if args.problem_type == 'binary':
                #     train_mask = train_mask.to(device).type(torch.cuda.FloatTensor)
                # else:
                #     #train_mask = train_mask.to(device).type(torch.cuda.LongTensor)
                #     train_mask = train_mask.to(device).type(torch.cuda.FloatTensor)

                outputs, outputs_mask_ind1, outputs_mask_ind2 = model(
                    train_image)
                #print(outputs.size())
                #print(outputs_mask_ind1.size())
                #print(outputs_mask_ind2.size())
                ### note that the last layer in the model is defined differently
                # if args.problem_type == 'binary':
                #     train_prob = F.sigmoid(outputs)
                #     loss = criterion(outputs, train_mask)
                # else:
                #     #train_prob = outputs
                #     train_prob = F.sigmoid(outputs)
                #     loss = torch.tensor(0).type(train_mask.type())
                #     for feat_inx in range(train_mask.shape[1]):
                #         loss += criterion(outputs, train_mask)
                train_prob = F.sigmoid(outputs)
                train_mask_ind_prob1 = F.sigmoid(outputs_mask_ind1)
                train_mask_ind_prob2 = F.sigmoid(outputs_mask_ind2)
                loss1 = criterion(outputs, train_mask)
                #loss1 = F.binary_cross_entropy_with_logits(outputs, train_mask)
                #loss2 = nn.BCEWithLogitsLoss()(outputs_mask_ind1, train_mask_ind)
                #print(train_mask_ind.size())
                #weight = torch.ones_like(train_mask_ind)
                #weight[:, 0] = weight[:, 0] * 1
                #weight[:, 1] = weight[:, 1] * 14
                #weight[:, 2] = weight[:, 2] * 14
                #weight[:, 3] = weight[:, 3] * 4
                #weight[:, 4] = weight[:, 4] * 4
                #weight = weight * train_mask_ind + 1
                #weight = weight.to(device).type(torch.cuda.FloatTensor)
                loss2 = F.binary_cross_entropy_with_logits(
                    outputs_mask_ind1, train_mask_ind)
                loss3 = F.binary_cross_entropy_with_logits(
                    outputs_mask_ind2, train_mask_ind)
                #loss3 = criterion(outputs_mask_ind2, train_mask_ind)
                loss = loss1 * w1 + loss2 * w2 + loss3 * w3
                #print(loss1.item(), loss2.item(), loss.item())
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                step += 1
                #jaccard += [get_jaccard(train_mask, (train_prob > 0).float()).item()]
                meter.add(train_prob, train_mask, train_mask_ind_prob1,
                          train_mask_ind_prob2, train_mask_ind, loss1.item(),
                          loss2.item(), loss3.item(), loss.item())
                # print(train_mask.data.shape)
                # print(train_mask.data.sum(dim=-2).shape)
                # print(train_mask.data.sum(dim=-2).sum(dim=-1).shape)
                # print(train_mask.data.sum(dim=-2).sum(dim=-1).sum(dim=0).shape)
                # intersection = train_mask.data.sum(dim=-2).sum(dim=-1)
                # print(intersection.shape)
                # print(intersection.dtype)
                # print(train_mask.data.shape[0])
                #torch.zeros([2, 4], dtype=torch.float32)
            #########################
            ## at the end of each epoch, evualte the metrics
            epoch_time = time.time() - start_time
            train_metrics = meter.value()
            train_metrics['epoch_time'] = epoch_time
            train_metrics['image'] = train_image.data
            train_metrics['mask'] = train_mask.data
            train_metrics['prob'] = train_prob.data

            #train_jaccard = np.mean(jaccard)
            #train_auc = str(round(mtr1.value()[0],2))+' '+str(round(mtr2.value()[0],2))+' '+str(round(mtr3.value()[0],2))+' '+str(round(mtr4.value()[0],2))+' '+str(round(mtr5.value()[0],2))
            valid_metrics = valid_fn(model, criterion, valid_loader, device,
                                     num_classes)
            ##############
            ## write events
            write_event(log,
                        step,
                        epoch=epoch,
                        train_metrics=train_metrics,
                        valid_metrics=valid_metrics)
            #save_weights(model, model_path, epoch + 1, step)
            #########################
            ## tensorboard
            write_tensorboard(writer,
                              model,
                              epoch,
                              train_metrics=train_metrics,
                              valid_metrics=valid_metrics)
            #########################
            ## save the best model
            valid_loss = valid_metrics['loss1']
            valid_jaccard = valid_metrics['jaccard']
            if valid_loss < previous_valid_loss:
                save_weights(model, model_path, epoch + 1, step, train_metrics,
                             valid_metrics)
                previous_valid_loss = valid_loss
                print('Save best model by loss')
            if valid_jaccard > previous_valid_jaccard:
                save_weights(model, model_path, epoch + 1, step, train_metrics,
                             valid_metrics)
                previous_valid_jaccard = valid_jaccard
                print('Save best model by jaccard')
            #########################
            ## change learning rate
            scheduler.step(valid_metrics['loss1'])

        except KeyboardInterrupt:
            # print('--' * 10)
            # print('Ctrl+C, saving snapshot')
            # save_weights(model, model_path, epoch, step)
            # print('done.')
            # print('--' * 10)
            writer.close()
            #return
    writer.close()
Exemplo n.º 10
0
            outputs = model(inputs)

            for j in range(inputs.size()[0]):

                ax = plt.subplot(num_images, 2, images_so_far + 1)
                ax.axis('off')
                imshow(inputs.cpu().data[j], gray=False)
                ax = plt.subplot(num_images, 2, images_so_far + 2)
                ax.axis('off')
                imshow(outputs.cpu().data[j], gray=True)

                images_so_far += 2

                if images_so_far == num_images * 2:
                    model.train(mode=was_training)
                    return

        model.train(mode=was_training)


model_ft = TernausNet16()
model_ft = model_ft.to(device)

criterion = LossBinary(jaccard_weight=1)

model_ft = test(model_ft, criterion)

visualize_model(model_ft)
plt.ioff()
plt.show()
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.5, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.0001)
    arg('--workers', type=int, default=12)
    arg('--type',
        type=str,
        default='binary',
        choices=['binary', 'parts', 'instruments'])
    arg('--model',
        type=str,
        default='UNet',
        choices=['UNet', 'UNet11', 'LinkNet34', 'AlbuNet'])

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    if args.type == 'parts':
        num_classes = 4
    elif args.type == 'instruments':
        num_classes = 8
    else:
        num_classes = 1

    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained=True)
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained=True)
    elif args.model == 'LinkNet34':
        model = LinkNet34(num_classes=num_classes, pretrained=True)
    elif args.model == 'AlbuNet':
        model = AlbuNet(num_classes=num_classes, pretrained=True)
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    if args.type == 'binary':
        loss = LossBinary(jaccard_weight=args.jaccard_weight)
    else:
        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names,
                    shuffle=False,
                    transform=None,
                    problem_type='binary',
                    batch_size=1):
        return DataLoader(dataset=CustomDataset(file_names,
                                                transform=transform),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=batch_size,
                          pin_memory=torch.cuda.is_available())

    train_file_names, val_file_names = get_split()

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    def train_transform(p=1):
        return Compose(
            [
                #            Rescale(SIZE),
                RandomCrop(SIZE),
                RandomBrightness(0.2),
                OneOf([
                    IAAAdditiveGaussianNoise(),
                    GaussNoise(),
                ], p=0.15),
                #            OneOf([
                #                OpticalDistortion(p=0.3),
                #                GridDistortion(p=.1),
                #                IAAPiecewiseAffine(p=0.3),
                #            ], p=0.1),
                #            OneOf([
                #                IAASharpen(),
                #                IAAEmboss(),
                #                RandomContrast(),
                #                RandomBrightness(),
                #            ], p=0.15),
                HueSaturationValue(p=0.15),
                HorizontalFlip(p=0.5),
                Normalize(p=1),
            ],
            p=p)

    def val_transform(p=1):
        return Compose(
            [
                #            Rescale(256),
                RandomCrop(SIZE),
                Normalize(p=1)
            ],
            p=p)

    train_loader = make_loader(train_file_names,
                               shuffle=True,
                               transform=train_transform(p=1),
                               problem_type=args.type,
                               batch_size=args.batch_size)
    valid_loader = make_loader(val_file_names,
                               transform=val_transform(p=1),
                               problem_type=args.type,
                               batch_size=len(device_ids))

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    if args.type == 'binary':
        valid = validation_binary
    else:
        valid = validation_multi

    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=valid,
                fold=args.fold,
                num_classes=num_classes)
Exemplo n.º 12
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.5, type=float)
    arg('--device-ids', type=str, default='0', help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.001)
    arg('--workers', type=int, default=8)
    arg('--loss', type=str, default='BCE', choices=['BCE', 'StableBCE', 'Lovasz'])
    arg('--model', type=str, default='ResNext', choices=['WideResnet', 'WideResnetShort', 'ResNext'])

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    if args.model == 'ResNext':
        model = ResNext(pretrained=True)
    elif args.model == 'WideResnet':
        model = WideResnet(pretrained=True)
    elif args.model == 'WideResnetShort':
        model = WideResnetShort(pretrained=True)

    print('CUDA: {}'.format(torch.cuda.is_available()))
    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    if args.loss == "Lovasz":
        loss = LossLovasz()
    elif args.loss == 'StableBCE':
        loss = LossStableBCE(jaccard_weight=args.jaccard_weight)
    else:
        loss = LossBinary(jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names, shuffle=False, transform=None, batch_size=1):
        return DataLoader(
            dataset=SaltDataset(file_names, transform=transform),
            shuffle=shuffle,
            num_workers=args.workers,
            batch_size=batch_size,
            pin_memory=torch.cuda.is_available()
        )

    train_file_names, val_file_names = get_split(args.fold)

    print('num train = {}, num_val = {}'.format(len(train_file_names), len(val_file_names)))

    def train_transform(p=1):
        return Compose([
            # ShiftScaleRotate(p=0.5),
            HorizontalFlip(p=0.5),
            Blur(blur_limit=3, p=.5),
            RandomContrast(p=0.3),
            RandomBrightness(p=0.3),
            ElasticTransform(p=0.3),
            Resize(202, 202, interpolation=cv2.INTER_NEAREST),
            PadIfNeeded(256, 256),
            Normalize(p=1)
        ], p=p)

    def val_transform(p=1):
        return Compose([
            Resize(202, 202, interpolation=cv2.INTER_NEAREST),
            PadIfNeeded(256, 256),
            Normalize(p=1)
        ], p=p)

    train_loader = make_loader(train_file_names, shuffle=True, transform=train_transform(p=1),
                               batch_size=args.batch_size)
    valid_loader = make_loader(val_file_names, transform=val_transform(p=1),
                               batch_size=args.batch_size)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    valid = validation_binary

    utils.train(
        init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
        args=args,
        model=model,
        criterion=loss,
        train_loader=train_loader,
        valid_loader=valid_loader,
        validation=valid,
        fold=args.fold
    )
Exemplo n.º 13
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    server = False
    path_default = "./data" if server else "e:/diploma"
    arg('--jaccard-weight', type=float, default=1)
    arg('--t', type=float, default=0.07)
    arg('--pretrain-epochs', type=int, default=100)
    arg('--train-epochs', type=int, default=100)
    arg('--train-test-split-file',
        type=str,
        default='./data/train_test_id.pickle',
        help='train test split file path')
    arg('--pretrain-image-path',
        type=str,
        default=f'{path_default}/ham10000_resized/',
        help='train test split file path')
    arg('--pretrain-mask-image-path',
        type=str,
        default=f'{path_default}/ham_clusters_20/lab/20/',
        help="images path for pretraining")
    arg('--image-path',
        type=str,
        default=f'{path_default}/task2_h5/',
        help="h5 images path for training")
    arg('--batch-size', type=int, default=8, help="n batches")
    arg('--workers', type=int, default=6, help="n workers")
    arg('--cuda-driver', type=int, default=1, help="cuda driver")
    arg('--resume-path', type=str, default=None)
    arg('--lr', type=float, default=0.001, help="lr")
    arg('--wandb', type=bool, default=True, help="wandb log")
    args = parser.parse_args()

    cudnn.benchmark = True
    torch.backends.cudnn.enabled = True
    device = torch.device(
        f'cuda:{args.cuda_driver}' if torch.cuda.is_available() else 'cpu')

    num_classes = 5
    args.num_classes = 5
    model = UNet16(num_classes=num_classes, pretrained="vgg")
    model = nn.DataParallel(model, device_ids=[args.cuda_driver])
    model.to(device)

    center_layer = model.module.center_Conv2d
    for p in center_layer.parameters():
        p.requires_grad = False

    pretrain_mask_image_path = args.pretrain_mask_image_path
    pretrain_image_path = args.pretrain_image_path
    pretrain_loader = make_pretrain_loader(pretrain_image_path,
                                           pretrain_mask_image_path,
                                           args,
                                           shuffle=True)
    epoch = 1
    optimizer = Adam(model.parameters(), lr=args.lr)
    scheduler = ReduceLROnPlateau(optimizer,
                                  'min',
                                  factor=0.8,
                                  patience=5,
                                  verbose=True)
    if args.resume_path is not None:
        state = torch.load(str(args.resume_path))
        epoch = state['epoch'] + 1
        model.load_state_dict(state['model'])
        print('--' * 10)
        print('Restored previous model, epoch {}'.format(epoch))
        print('--' * 10)
    print(model)
    print('Start pretraining')
    criterion = ContrastiveLoss(args.t, device)
    cuda_available = torch.cuda.is_available()
    wandb.init(project="pipeline")
    wandb.run.name = f"pipeline lr = {args.lr}\n pretrain epochs = {args.pretrain_epochs}\ntrain epochs = {args.train_epochs}"
    wandb.run.save()
    wandb.watch(model)
    for epoch in range(epoch, args.pretrain_epochs + 1):
        model.train()
        start_time = time.time()
        losses = []
        for ind, (id, image_original, image_transformed, mask_original,
                  mask_transformed) in enumerate(pretrain_loader):
            start_step = time.time()
            print()
            #print(torch.cuda.memory_allocated(device)/ (1024 ** 2))
            #print(torch.cuda.memory_reserved(device)/(1024 ** 2))
            #print(torch.cuda.max_memory_allocated(device) / (1024 ** 2))
            train_image_original = image_original.permute(0, 3, 1, 2)
            train_image_transformed = image_transformed.permute(0, 3, 1, 2)
            #print(f"permute time {time.time() - start_step}")
            #start_image_transfer = time.time()
            train_image_original = train_image_original.cuda(device,
                                                             non_blocking=True)
            train_image_transformed = train_image_transformed.cuda(
                device, non_blocking=True)
            #print(f"train transfer : {time.time() - start_image_transfer}")
            #start_mask_transfer = time.time()
            mask_original = mask_original.cuda(device, non_blocking=True).type(
                torch.cuda.ByteTensor if cuda_available else torch.ByteTensor)
            mask_transformed = mask_transformed.cuda(
                device,
                non_blocking=True).type(torch.cuda.ByteTensor if cuda_available
                                        else torch.ByteTensor)
            #print(f"mask_transfer : {time.time() - start_mask_transfer}")
            #forward_start = time.time()
            original_result, _ = model(train_image_original)
            transformed_result, _ = model(train_image_transformed)
            #print(f"forward start :{time.time() - forward_start}")
            loss = (criterion(original_result, transformed_result,
                              mask_original, mask_transformed))
            losses.append(loss.item())
            print(f'epoch={epoch:3d},iter={ind:3d}, loss={loss.item():.4g}')
            zero_grad_start = time.time()
            optimizer.zero_grad()
            #print(f"zero grad time:{time.time() - zero_grad_start}")
            #start_backward = time.time()
            loss.backward()
            #print(f"backward time:{time.time() - start_backward}")
            #start_optimizer = time.time()
            optimizer.step()
            #print(f"oprimizer time:{time.time() - start_optimizer}")
            print(f"step time:{time.time() - start_step}")

        avg_loss = np.mean(losses)
        wandb.log({"pretrain/loss": avg_loss})
        epoch_time = time.time() - start_time
        print(f"epoch time:{epoch_time}")
        scheduler.step(avg_loss)
        model_path = f"checkpoint/model_epoch_{epoch}.pt"
        torch.save(
            {
                'model': model.state_dict(),
                'epoch': epoch,
                'loss': avg_loss
            }, str(model_path))
    print("Pretraining ended")
    epoch = 1
    ## get train_test_id
    train_test_id = get_split(args.train_test_split_file)

    ## train vs. val
    print('--' * 10)
    print('num train = {}, num_val = {}'.format(
        (train_test_id['Split'] == 'train').sum(),
        (train_test_id['Split'] != 'train').sum()))
    print('--' * 10)
    image_path = args.image_path
    train_loader = make_loader(
        train_test_id,
        image_path,
        args,
        train=True,
        shuffle=True,
        train_test_split_file=args.train_test_split_file)
    valid_loader = make_loader(
        train_test_id,
        image_path,
        args,
        train=False,
        shuffle=True,
        train_test_split_file=args.train_test_split_file)
    if True:
        print('--' * 10)
        print('check data')
        train_image, train_mask, train_mask_ind = next(iter(train_loader))
        print('train_image.shape', train_image.shape)
        print('train_mask.shape', train_mask.shape)
        print('train_mask_ind.shape', train_mask_ind.shape)
        print('train_image.min', train_image.min().item())
        print('train_image.max', train_image.max().item())
        print('train_mask.min', train_mask.min().item())
        print('train_mask.max', train_mask.max().item())
        print('train_mask_ind.min', train_mask_ind.min().item())
        print('train_mask_ind.max', train_mask_ind.max().item())
        print('--' * 10)
    valid_fn = validation_binary
    criterion = LossBinary(jaccard_weight=args.jaccard_weight)
    meter = AllInOneMeter(device)

    model.module.projection_head = nn.Conv2d(32, num_classes, 1)
    center_layer = model.module.center_Conv2d
    for p in center_layer.parameters():
        p.requires_grad = True

    print(model)
    optimizer = Adam(model.parameters(), lr=args.lr)
    scheduler = ReduceLROnPlateau(optimizer,
                                  'min',
                                  factor=0.8,
                                  patience=5,
                                  verbose=True)
    print("Start fine tuning")
    for epoch in range(epoch, args.train_epochs + 1):
        model.train()
        start_time = time.time()
        meter.reset()
        w1 = 1.0
        w2 = 0.5
        w3 = 0.5
        for i, (train_image, train_mask,
                train_mask_ind) in enumerate(train_loader):
            train_image = train_image.permute(0, 3, 1, 2)
            train_mask = train_mask.permute(0, 3, 1, 2)
            train_image = train_image.to(device)
            train_mask = train_mask.to(
                device).type(torch.cuda.FloatTensor if torch.cuda.is_available(
                ) else torch.FloatTensor)
            train_mask_ind = train_mask_ind.to(
                device).type(torch.cuda.FloatTensor if torch.cuda.is_available(
                ) else torch.FloatTensor)
            outputs, outputs_mask_ind1 = model(train_image)
            outputs_mask_ind2 = nn.MaxPool2d(
                kernel_size=outputs.size()[2:])(outputs)
            outputs_mask_ind2 = torch.squeeze(outputs_mask_ind2, 2)
            outputs_mask_ind2 = torch.squeeze(outputs_mask_ind2, 2)
            train_prob = torch.sigmoid(outputs)
            train_mask_ind_prob1 = torch.sigmoid(outputs_mask_ind1)
            train_mask_ind_prob2 = torch.sigmoid(outputs_mask_ind2)
            loss1 = criterion(outputs, train_mask)
            loss2 = F.binary_cross_entropy_with_logits(outputs_mask_ind1,
                                                       train_mask_ind)
            loss3 = F.binary_cross_entropy_with_logits(outputs_mask_ind2,
                                                       train_mask_ind)
            loss = loss1 * w1 + loss2 * w2 + loss3 * w3
            print(
                f'epoch={epoch:3d},iter={i:3d}, loss1={loss1.item():.4g}, loss2={loss2.item():.4g}, loss={loss.item():.4g}'
            )
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            meter.add(train_prob, train_mask, train_mask_ind_prob1,
                      train_mask_ind_prob2, train_mask_ind, loss1.item(),
                      loss2.item(), loss3.item(), loss.item())
        epoch_time = time.time() - start_time
        train_metrics = meter.value()
        train_metrics['epoch_time'] = epoch_time
        train_metrics['image'] = train_image.data
        train_metrics['mask'] = train_mask.data
        train_metrics['prob'] = train_prob.data
        valid_metrics = valid_fn(model, criterion, valid_loader, device)

        wandb.log({
            "loss/loss":
            valid_metrics["loss"],
            "loss/loss1":
            valid_metrics["loss1"],
            "loss/loss2":
            valid_metrics["loss2"],
            "jaccard_mean/jaccard_mean":
            valid_metrics["jaccard"],
            "jaccard_class/jaccard_pigment_network":
            valid_metrics["jaccard1"],
            "jaccard_class/jaccard_negative_network":
            valid_metrics["jaccard2"],
            "jaccard_class/jaccard_streaks":
            valid_metrics["jaccard3"],
            "jaccard_class/jaccard_milia_like_cyst":
            valid_metrics["jaccard4"],
            "jaccard_class/jaccard_globules":
            valid_metrics["jaccard5"]
        })
        scheduler.step(valid_metrics['loss1'])