Beispiel #1
0
def main():
    config = load_config(config_file_name="lgb_titanic_config.ini")
    data = load_data(data_file_name="titanic_training.csv")
    set_randomseed()

    transformer = TransformerCsv(data_df=data)

    lgb_model = LgbClassifier(model_params_path=config.get('model_params', 'lgb_params_path'),
                              target_col=config.get('data_columns', 'target_col'))

    train_data, test_data = titanic_data_transform(transformer)

    lgb_trainer = Trainer()
    lgb_model = lgb_trainer.fit(model_class=lgb_model, train_data_df=train_data)

    eval_dict = lgb_model.eval(test_data=test_data)

    print("test acc:", eval_dict["acc"])
    print("conf_matrix", eval_dict["conf_matrix"])
    result_ploter = PlotClassificationResult()
    result_ploter.plot_roc_curve(roc_tapple=eval_dict["roc_curve"], plot_show=False)
Beispiel #2
0
def main(args):

    # region -----------------------------------------记录训练日志-----------------------------------------

    if 0 == len(args.resume):
        logger = Logger('./logs/' + args.model + '.log')
    else:
        logger = Logger('./logs/' + args.model + '.log', True)

    logger.append(vars(args))

    # if args.display:
    #     writer = SummaryWriter()
    # else:
    #     writer = None
    writer = SummaryWriter()

    # endregion

    # region ----------------------------------------数据预处理配置----------------------------------------
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'val':
        transforms.Compose([
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }
    # endregion

    # region -------------------------------------------数据加载-------------------------------------------

    train_datasets = datasets.CIFAR10(root="data",
                                      train=True,
                                      download=True,
                                      transform=transforms.ToTensor())
    val_datasets = datasets.CIFAR10(root="data",
                                    train=False,
                                    download=True,
                                    transform=transforms.ToTensor())

    # train_datasets = datasets.ImageFolder(os.path.join(args.data_root, 't256'), data_transforms['train'])
    # val_datasets   = datasets.ImageFolder(os.path.join(args.data_root, 'v256'), data_transforms['val'])
    train_dataloaders = torch.utils.data.DataLoader(train_datasets,
                                                    batch_size=args.batch_size,
                                                    shuffle=True,
                                                    num_workers=4)
    val_dataloaders = torch.utils.data.DataLoader(val_datasets,
                                                  batch_size=1024,
                                                  shuffle=False,
                                                  num_workers=4)
    # endregion

    # region --------------------------------------网络无关配置设置----------------------------------------
    # 记录日志
    if args.debug:
        x, y = next(iter(train_dataloaders))
        logger.append([x, y])

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    is_use_cuda = torch.cuda.is_available()
    cudnn.benchmark = True

    # 设置基础网络
    if 'resnet50' == args.model.split('_')[0]:
        my_model = models.resnet50(pretrained=False)
    elif 'resnet50-cbam' == args.model.split('_')[0]:
        my_model = resnet_cbam.resnet50_cbam(pretrained=False)
    elif 'resnet101' == args.model.split('_')[0]:
        my_model = models.resnet101(pretrained=False)
    else:
        raise ModuleNotFoundError

    # endregion

    # region --------------------------------------网络训练配置设置----------------------------------------
    # 损失函数设定
    loss_fn = [nn.CrossEntropyLoss()]
    # 优化器设置
    optimizer = optim.SGD(my_model.parameters(),
                          lr=0.1,
                          momentum=0.9,
                          weight_decay=1e-4)
    # 学习率优化函数设置
    lr_schedule = lr_scheduler.MultiStepLR(optimizer,
                                           milestones=[30, 60],
                                           gamma=0.1)
    # 累计误差TOP5
    metric = [ClassErrorMeter([1, 5], True)]
    # 迭代次数
    epoch = int(args.epoch)
    # 传入训练器
    my_trainer = Trainer(my_model, args.model, loss_fn, optimizer, lr_schedule,
                         500, is_use_cuda, train_dataloaders, val_dataloaders,
                         metric, 0, epoch, args.debug, logger, writer)
    # 训练
    my_trainer.fit()
    logger.append('训练完毕')
def main(args):
    if 0 == len(args.resume):
        logger = Logger('./logs/' + args.model + '.log')
    else:
        logger = Logger('./logs/' + args.model + '.log', True)

    logger.append(vars(args))

    if args.display:
        writer = SummaryWriter()
    else:
        writer = None

    gpus = args.gpu.split(',')
    data_transforms = {
        'train':
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]),
        'val':
        transforms.Compose([
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }

    train_datasets = datasets.ImageFolder(os.path.join(args.data_root, 't256'),
                                          data_transforms['train'])
    val_datasets = datasets.ImageFolder(os.path.join(args.data_root, 'v256'),
                                        data_transforms['val'])
    train_dataloaders = torch.utils.data.DataLoader(
        train_datasets,
        batch_size=args.batch_size * len(gpus),
        shuffle=True,
        num_workers=8)
    val_dataloaders = torch.utils.data.DataLoader(val_datasets,
                                                  batch_size=1024,
                                                  shuffle=False,
                                                  num_workers=8)

    if args.debug:
        x, y = next(iter(train_dataloaders))
        logger.append([x, y])

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    is_use_cuda = torch.cuda.is_available()
    cudnn.benchmark = True

    if 'resnet50' == args.model.split('_')[0]:
        my_model = models.resnet50(pretrained=False)
    elif 'resnet101' == args.model.split('_')[0]:
        my_model = models.resnet101(pretrained=False)
    elif 'mnasnet' == args.model.split('_')[0]:
        my_model = mnasnet.MnasNet()
    elif 'shufflenetv2' == args.model.split('_')[0]:
        my_model = shufflenetv2.ShuffleNetV2()
    else:
        raise ModuleNotFoundError

    #my_model.apply(fc_init)
    if is_use_cuda and 1 == len(gpus):
        my_model = my_model.cuda()
    elif is_use_cuda and 1 < len(gpus):
        my_model = nn.DataParallel(my_model.cuda())

    loss_fn = [nn.CrossEntropyLoss()]
    optimizer = optim.SGD(my_model.parameters(),
                          lr=0.1,
                          momentum=0.9,
                          weight_decay=1e-4)
    lr_schedule = lr_scheduler.MultiStepLR(optimizer,
                                           milestones=[30, 60],
                                           gamma=0.1)  #

    metric = [ClassErrorMeter([1, 5], True)]
    start_epoch = 0
    num_epochs = 90

    my_trainer = Trainer(my_model, args.model, loss_fn, optimizer, lr_schedule, 500, is_use_cuda, train_dataloaders, \
                        val_dataloaders, metric, start_epoch, num_epochs, args.debug, logger, writer)
    my_trainer.fit()
    logger.append('Optimize Done!')
Beispiel #4
0
def main(cfg):
    """Runs main training procedure."""

    print('Starting training...')
    print('Current working directory is:', os.getcwd())

    # fix random seeds for reproducibility
    seed_everything(seed=cfg['seed'])

    # neptune logging
    neptune.init(project_qualified_name=cfg['neptune_project_name'],
                 api_token=cfg['neptune_api_token'])

    neptune.create_experiment(name=cfg['neptune_experiment'], params=cfg)

    num_classes = 1 if len(cfg['classes']) == 1 else (len(cfg['classes']) + 1)
    activation = 'sigmoid' if num_classes == 1 else 'softmax2d'
    background = False if cfg['ignore_channels'] else True

    aux_params = dict(
        pooling=cfg['pooling'],  # one of 'avg', 'max'
        dropout=cfg['dropout'],  # dropout ratio, default is None
        activation='sigmoid',  # activation function, default is None
        classes=num_classes)  # define number of output labels

    # configure model
    models = {
        'unet':
        Unet(encoder_name=cfg['encoder_name'],
             encoder_weights=cfg['encoder_weights'],
             decoder_use_batchnorm=cfg['use_batchnorm'],
             classes=num_classes,
             activation=activation,
             aux_params=aux_params),
        'unetplusplus':
        UnetPlusPlus(encoder_name=cfg['encoder_name'],
                     encoder_weights=cfg['encoder_weights'],
                     decoder_use_batchnorm=cfg['use_batchnorm'],
                     classes=num_classes,
                     activation=activation,
                     aux_params=aux_params),
        'deeplabv3plus':
        DeepLabV3Plus(encoder_name=cfg['encoder_name'],
                      encoder_weights=cfg['encoder_weights'],
                      classes=num_classes,
                      activation=activation,
                      aux_params=aux_params)
    }

    assert cfg['architecture'] in models.keys()
    model = models[cfg['architecture']]

    # configure loss
    losses = {
        'dice_loss':
        DiceLoss(include_background=background,
                 softmax=False,
                 batch=cfg['combine']),
        'generalized_dice':
        GeneralizedDiceLoss(include_background=background,
                            softmax=False,
                            batch=cfg['combine'])
    }

    assert cfg['loss'] in losses.keys()
    loss = losses[cfg['loss']]

    # configure optimizer
    optimizers = {
        'adam': Adam([dict(params=model.parameters(), lr=cfg['lr'])]),
        'adamw': AdamW([dict(params=model.parameters(), lr=cfg['lr'])]),
        'rmsprop': RMSprop([dict(params=model.parameters(), lr=cfg['lr'])])
    }

    assert cfg['optimizer'] in optimizers.keys()
    optimizer = optimizers[cfg['optimizer']]

    # configure metrics
    metrics = {
        'dice_score':
        DiceMetric(include_background=background, reduction='mean'),
        'dice_smp':
        Fscore(threshold=cfg['rounding'],
               ignore_channels=cfg['ignore_channels']),
        'iou_smp':
        IoU(threshold=cfg['rounding'], ignore_channels=cfg['ignore_channels']),
        'generalized_dice':
        GeneralizedDiceLoss(include_background=background,
                            softmax=False,
                            batch=cfg['combine']),
        'dice_loss':
        DiceLoss(include_background=background,
                 softmax=False,
                 batch=cfg['combine']),
        'cross_entropy':
        BCELoss(reduction='mean'),
        'accuracy':
        Accuracy(ignore_channels=cfg['ignore_channels'])
    }

    assert all(m['name'] in metrics.keys() for m in cfg['metrics'])
    metrics = [(metrics[m['name']], m['name'], m['type'])
               for m in cfg['metrics']]  # tuple of (metric, name, type)

    # configure scheduler
    schedulers = {
        'steplr':
        StepLR(optimizer, step_size=cfg['step_size'], gamma=0.5),
        'cosine':
        CosineAnnealingLR(optimizer,
                          cfg['epochs'],
                          eta_min=cfg['eta_min'],
                          last_epoch=-1)
    }

    assert cfg['scheduler'] in schedulers.keys()
    scheduler = schedulers[cfg['scheduler']]

    # configure augmentations
    train_transform = load_train_transform(transform_type=cfg['transform'],
                                           patch_size=cfg['patch_size'])
    valid_transform = load_valid_transform(patch_size=cfg['patch_size'])

    train_dataset = SegmentationDataset(df_path=cfg['train_data'],
                                        transform=train_transform,
                                        normalize=cfg['normalize'],
                                        tissuemix=cfg['tissuemix'],
                                        probability=cfg['probability'],
                                        blending=cfg['blending'],
                                        warping=cfg['warping'],
                                        color=cfg['color'])

    valid_dataset = SegmentationDataset(df_path=cfg['valid_data'],
                                        transform=valid_transform,
                                        normalize=cfg['normalize'])

    # save intermediate augmentations
    if cfg['eval_dir']:
        default_dataset = SegmentationDataset(df_path=cfg['train_data'],
                                              transform=None,
                                              normalize=None)

        transform_dataset = SegmentationDataset(df_path=cfg['train_data'],
                                                transform=None,
                                                normalize=None,
                                                tissuemix=cfg['tissuemix'],
                                                probability=cfg['probability'],
                                                blending=cfg['blending'],
                                                warping=cfg['warping'],
                                                color=cfg['color'])

        for idx in range(0, min(500, len(default_dataset)), 10):
            image_input, image_mask = default_dataset[idx]
            image_input = image_input.transpose((1, 2, 0))
            image_input = image_input.astype(np.uint8)

            image_mask = image_mask.transpose(
                1, 2, 0)  # Why do we need transpose here?
            image_mask = image_mask.astype(np.uint8)
            image_mask = image_mask.squeeze()
            image_mask = image_mask * 255

            image_transform, _ = transform_dataset[idx]
            image_transform = image_transform.transpose(
                (1, 2, 0)).astype(np.uint8)

            idx_str = str(idx).zfill(3)
            skimage.io.imsave(os.path.join(cfg['eval_dir'],
                                           f'{idx_str}a_image_input.png'),
                              image_input,
                              check_contrast=False)
            plt.imsave(os.path.join(cfg['eval_dir'],
                                    f'{idx_str}b_image_mask.png'),
                       image_mask,
                       vmin=0,
                       vmax=1)
            skimage.io.imsave(os.path.join(cfg['eval_dir'],
                                           f'{idx_str}c_image_transform.png'),
                              image_transform,
                              check_contrast=False)

        del transform_dataset

    train_loader = DataLoader(train_dataset,
                              batch_size=cfg['batch_size'],
                              num_workers=cfg['workers'],
                              shuffle=True)

    valid_loader = DataLoader(valid_dataset,
                              batch_size=cfg['batch_size'],
                              num_workers=cfg['workers'],
                              shuffle=False)

    trainer = Trainer(model=model,
                      device=cfg['device'],
                      save_checkpoints=cfg['save_checkpoints'],
                      checkpoint_dir=cfg['checkpoint_dir'],
                      checkpoint_name=cfg['checkpoint_name'])

    trainer.compile(optimizer=optimizer,
                    loss=loss,
                    metrics=metrics,
                    num_classes=num_classes)

    trainer.fit(train_loader,
                valid_loader,
                epochs=cfg['epochs'],
                scheduler=scheduler,
                verbose=cfg['verbose'],
                loss_weight=cfg['loss_weight'])

    # validation inference
    best_model = model
    best_model.load_state_dict(
        torch.load(os.path.join(cfg['checkpoint_dir'],
                                cfg['checkpoint_name'])))
    best_model.to(cfg['device'])
    best_model.eval()

    # setup directory to save plots
    if os.path.isdir(cfg['plot_dir']):
        # remove existing dir and content
        shutil.rmtree(cfg['plot_dir'])
    # create absolute destination
    os.makedirs(cfg['plot_dir'])

    # valid dataset without transformations and normalization for image visualization
    valid_dataset_vis = SegmentationDataset(df_path=cfg['valid_data'],
                                            transform=valid_transform,
                                            normalize=None)

    if cfg['save_checkpoints']:
        for n in range(len(valid_dataset)):
            image_vis = valid_dataset_vis[n][0].astype('uint8')
            image_vis = image_vis.transpose((1, 2, 0))

            image, gt_mask = valid_dataset[n]
            gt_mask = gt_mask.transpose((1, 2, 0))
            gt_mask = gt_mask.squeeze()

            x_tensor = torch.from_numpy(image).to(cfg['device']).unsqueeze(0)
            pr_mask, _ = best_model.predict(x_tensor)
            pr_mask = pr_mask.cpu().numpy().round()
            pr_mask = pr_mask.squeeze()

            save_predictions(out_path=cfg['plot_dir'],
                             index=n,
                             image=image_vis,
                             ground_truth_mask=gt_mask,
                             predicted_mask=pr_mask,
                             average='macro')
def main():
    global args

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
    if args.distributed:
        init_distributed_mode(args)
    else:
        args.local_rank = 0

    if args.local_rank == 0 and not os.path.exists(args.model_dir):
        os.makedirs(args.model_dir)

    model, criterion, optimizer, lr_scheduler = initialize_setting()

    train_dataset, val_dataset, test_dataset, num_classes, in_shape = \
        data_loader.__dict__[args.dataset](
            args.data, augmentation=True, cutout=args.cutout)

    num_train = len(train_dataset)
    indices = list(range(num_train))
    split = int(np.floor(args.train_portion * num_train))

    train_sampler = DistributedSampler(train_dataset) \
        if args.distributed else torch.utils.data.sampler.SubsetRandomSampler(indices[:split])

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              shuffle=(train_sampler is None),
                              num_workers=args.workers,
                              pin_memory=True,
                              sampler=train_sampler)

    if args.train_portion < 1:
        valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(
            indices[split:num_train])
        val_loader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                shuffle=False,
                                sampler=valid_sampler,
                                num_workers=args.workers,
                                pin_memory=True)
    else:
        val_loader = None

    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=True)

    cudnn.benchmark = True
    writer = SummaryWriter(args.model_dir) if args.local_rank == 0 else None
    best_acc1 = 0.0

    trainer = Trainer(model, criterion, optimizer, train_sampler, lr_scheduler,
                      train_loader, val_loader, test_loader, writer, best_acc1,
                      args)

    acc1, best_acc1 = trainer.fit(init=True)

    if args.local_rank == 0:
        save_result(model, in_shape, acc1, best_acc1, args)
        writer.close()