Пример #1
0
def train(ds, folds, config, num_workers=0, transforms=None, skip_folds=None):
    os.makedirs(os.path.join('..', 'weights'), exist_ok=True)
    os.makedirs(os.path.join('..', 'logs'), exist_ok=True)

    for fold, (train_idx, val_idx) in enumerate(folds):
        if skip_folds and fold in skip_folds:
            continue

        tr = TrainDataset(ds, train_idx, config, transform=transforms)
        val = ValDataset(ds, val_idx, config, transform=None)
        train_loader = PytorchDataLoader(tr,
                                         batch_size=config.batch_size,
                                         shuffle=True,
                                         drop_last=True,
                                         num_workers=num_workers,
                                         pin_memory=True)
        val_loader = PytorchDataLoader(val,
                                       batch_size=config.batch_size,
                                       shuffle=False,
                                       drop_last=False,
                                       num_workers=num_workers,
                                       pin_memory=True)
        trainer = PytorchTrain(fold=fold,
                               config=config,
                               metrics=[('soft dice', dice_loss),
                                        ('hard dice', dice_clamp),
                                        ('bce', nn.modules.loss.BCELoss())])
        trainer.fit(train_loader, val_loader)
        trainer.writer.close()
Пример #2
0
def train(ds,
          fold,
          train_idx,
          val_idx,
          config,
          val_ds=None,
          num_workers=0,
          transforms=None,
          val_transforms=None):
    os.makedirs(os.path.join(config.results_dir, 'weights'), exist_ok=True)
    os.makedirs(os.path.join(config.results_dir, 'logs'), exist_ok=True)

    save_path = os.path.join(config.results_dir, 'weights', config.folder)
    model = models[config.network](num_classes=config.num_classes,
                                   num_channels=config.num_channels)
    estimator = Estimator(model,
                          optimizers[config.optimizer],
                          save_path,
                          config=config)

    estimator.lr_scheduler = MultiStepLR(estimator.optimizer,
                                         config.lr_steps,
                                         gamma=config.lr_gamma)
    callbacks = [
        ModelSaver(1, ("fold" + str(fold) + "_best.pth"), best_only=True),
        ModelSaver(1, ("fold" + str(fold) + "_last.pth"), best_only=False),
        CheckpointSaver(1, ("fold" + str(fold) + "_checkpoint.pth")),
        # LRDropCheckpointSaver(("fold"+str(fold)+"_checkpoint_e{epoch}.pth")),
        # ModelFreezer(),
        TensorBoard(
            os.path.join(config.results_dir, 'logs', config.folder,
                         'fold{}'.format(fold)))
    ]

    trainer = PytorchTrain(estimator,
                           fold=fold,
                           callbacks=callbacks,
                           hard_negative_miner=None)

    train_loader = PytorchDataLoader(TrainDataset(ds,
                                                  train_idx,
                                                  config,
                                                  transforms=transforms),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     drop_last=True,
                                     num_workers=num_workers,
                                     pin_memory=True)
    val_loader = PytorchDataLoader(
        ValDataset(val_ds if val_ds is not None else ds,
                   val_idx,
                   config,
                   transforms=val_transforms),
        batch_size=config.batch_size if not config.ignore_target_size else 1,
        shuffle=False,
        drop_last=False,
        num_workers=num_workers,
        pin_memory=True)

    trainer.fit(train_loader, val_loader, config.nb_epoch)
Пример #3
0
def train(ds, val_ds, fold, train_idx, val_idx, config, num_workers=0, transforms=None, val_transforms=None, num_channels_changed=False, final_changed=False, cycle=False):
    os.makedirs(os.path.join('..', 'weights'), exist_ok=True)
    os.makedirs(os.path.join('..', 'logs'), exist_ok=True)

    save_path = os.path.join('..', 'weights', config.folder)
    model = models[config.network](num_classes=config.num_classes, num_channels=config.num_channels)
    estimator = Estimator(model, optimizers[config.optimizer], save_path,
                          config=config, num_channels_changed=num_channels_changed, final_changed=final_changed)

    estimator.lr_scheduler = ExponentialLR(estimator.optimizer, config.lr_gamma)#LRStepScheduler(estimator.optimizer, config.lr_steps)
    callbacks = [
        ModelSaver(1, ("fold"+str(fold)+"_best.pth"), best_only=True),
        ModelSaver(1, ("fold"+str(fold)+"_last.pth"), best_only=False),
        CheckpointSaver(1, ("fold"+str(fold)+"_checkpoint.pth")),
        # LRDropCheckpointSaver(("fold"+str(fold)+"_checkpoint_e{epoch}.pth")),
        ModelFreezer(),
        # EarlyStopper(10),
        TensorBoard(os.path.join('..', 'logs', config.folder, 'fold{}'.format(fold)))
    ]
    # if not num_channels_changed:
    #     callbacks.append(LastCheckpointSaver("fold"+str(fold)+"_checkpoint_rgb.pth", config.nb_epoch))

    hard_neg_miner = None#HardNegativeMiner(rate=10)
    # metrics = [('dr', dice_round)]

    trainer = PytorchTrain(estimator,
                           fold=fold,
                           callbacks=callbacks,
                           hard_negative_miner=hard_neg_miner)

    train_loader = PytorchDataLoader(TrainDataset(ds, train_idx, config, transforms=transforms),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     drop_last=True,
                                     num_workers=num_workers,
                                     pin_memory=True)
    val_loader = PytorchDataLoader(ValDataset(val_ds, val_idx, config, transforms=val_transforms),
                                   batch_size=1,
                                   shuffle=False,
                                   drop_last=False,
                                   num_workers=num_workers,
                                   pin_memory=True)

    trainer.fit(train_loader, val_loader, config.nb_epoch)
Пример #4
0
def train(ds,
          fold,
          train_idx,
          val_idx,
          config,
          save_path,
          log_path,
          val_ds=None,
          num_workers=0,
          transforms=None,
          val_transforms=None,
          logger=None):
    #os.makedirs(os.path.join(config.results_dir, 'weights'), exist_ok=True)
    #os.makedirs(os.path.join(config.results_dir, 'logs'), exist_ok=True)
    #save_path = os.path.join(config.results_dir, 'weights', config.folder)
    model = models[config.network](num_classes=config.num_classes,
                                   num_channels=config.num_channels)
    print("model:", model)
    if logger:
        logger.info("pytorch_utils train.py config.num_channels: {}".format(
            config.num_channels))
        logger.info(
            "pytorch_utils train.py function train(),  model: {}".format(
                model))
    else:
        print("pytorch_utils train.py config.num_channels:",
              config.num_channels)
        print("pytorch_utils train.py function train(),  model:", model)
    estimator = Estimator(model,
                          optimizers[config.optimizer],
                          save_path,
                          config=config)
    #print("pytorch_utils train.py estimator:", estimator)

    estimator.lr_scheduler = MultiStepLR(estimator.optimizer,
                                         config.lr_steps,
                                         gamma=config.lr_gamma)
    callbacks = [
        ModelSaver(1, ("fold" + str(fold) + "_best.pth"), best_only=True),
        ModelSaver(1, ("fold" + str(fold) + "_last.pth"), best_only=False),
        CheckpointSaver(1, ("fold" + str(fold) + "_checkpoint.pth")),
        # LRDropCheckpointSaver(("fold"+str(fold)+"_checkpoint_e{epoch}.pth")),
        # ModelFreezer(),
        TensorBoard(
            os.path.join(log_path, config.save_weights_dir,
                         'fold{}'.format(fold))),
        #TensorBoard(os.path.join(config.results_dir, 'logs', config.save_weights_name, 'fold{}'.format(fold)))
        # AVE edit:
        EarlyStopper(config.early_stopper_patience)
    ]

    #print ("pytorch_utils.train.py test0")
    trainer = PytorchTrain(estimator,
                           fold=fold,
                           callbacks=callbacks,
                           hard_negative_miner=None)
    #print ("pytorch_utils.train.py test1")

    #z = TrainDataset(ds, train_idx, config, transforms=transforms)
    #print ("TrainDataSet:", z)
    #print ("len TrainDataSet:", len(z))
    print("pytorch_utils.train.py len train_idx", len(train_idx))
    train_loader = PytorchDataLoader(TrainDataset(ds,
                                                  train_idx,
                                                  config,
                                                  transforms=transforms),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     drop_last=True,
                                     num_workers=num_workers,
                                     pin_memory=True)
    print("pytorch_utils.train.py len train_loader", len(train_loader))
    print("  (len train_loader is num images * 8 / batch_size)")
    val_loader = PytorchDataLoader(
        ValDataset(val_ds if val_ds is not None else ds,
                   val_idx,
                   config,
                   transforms=val_transforms),
        batch_size=config.batch_size if not config.ignore_target_size else 1,
        shuffle=False,
        drop_last=False,
        num_workers=num_workers,
        pin_memory=True)
    print("pytorch_utils.train.py len val_loader:", len(val_loader))
    print("Run trainer.fit in pytorch_utils.train.py...")
    trainer.fit(train_loader, val_loader, config.nb_epoch, logger=logger)
Пример #5
0
def mix_train(datasets,
              config,
              num_workers=0,
              transforms=None,
              skip_folds=None,
              num_channels_changed=False):
    """
    here we construct all needed structures and specify parameters
    """
    os.makedirs(os.path.join(config.results_dir, 'weights'), exist_ok=True)
    os.makedirs(os.path.join(config.results_dir, 'logs'), exist_ok=True)

    # Create the model
    save_path = os.path.join(config.results_dir, 'weights', config.folder)
    model = models[config.network](num_classes=1,
                                   num_channels=config.num_channels)
    estimator = Estimator(model,
                          optimizers[config.optimizer],
                          losses[config.loss],
                          save_path,
                          iter_size=config.iter_size,
                          lr=config.lr,
                          num_channels_changed=num_channels_changed)

    callbacks = [
        StepLR(config.lr, num_epochs_per_decay=30, lr_decay_factor=0.1),
        ModelSaver(1, ("fold_best.pth"), best_only=True),
        CheckpointSaver(1, ("fold_checkpoint.pth")),
        # EarlyStopper(10),
        TensorBoard(
            os.path.join(config.results_dir, 'logs', config.folder, 'fold'))
    ]
    metrics = [('dice', dice), ('bce', binary_cross_entropy),
               ('dice round', dice_round)]
    trainer = PytorchTrain(
        estimator,
        fold='*',  # * is just for fun, since we don't use folds with mixed :P
        metrics=metrics,
        callbacks=callbacks,
        hard_negative_miner=None)
    # Create the datasets
    train_loaders = []
    val_loaders = []
    for i, ds in enumerate(datasets):
        # actually create the datasets
        if transforms:
            tfs = transforms[i]
        else:
            tfs = None
        train_loader = PytorchDataLoader(TrainDataset(ds,
                                                      list(range(len(ds))),
                                                      config,
                                                      transforms=tfs),
                                         batch_size=config.batch_size,
                                         shuffle=True,
                                         drop_last=True,
                                         num_workers=num_workers,
                                         pin_memory=False)
        val_loader = PytorchDataLoader(ValDataset(ds,
                                                  list(range(len(ds))),
                                                  config,
                                                  transforms=None),
                                       batch_size=config.batch_size,
                                       shuffle=True,
                                       drop_last=False,
                                       num_workers=num_workers,
                                       pin_memory=False)
        train_loaders.append(train_loader)
        val_loaders.append(val_loader)

    trainer.fit_mixed(train_loaders, val_loaders, config.nb_epoch)
Пример #6
0
def train(ds,
          folds,
          config,
          num_workers=0,
          transforms=None,
          skip_folds=None,
          num_channels_changed=False):
    """
    here we construct all needed structures and specify parameters
    """
    os.makedirs(os.path.join(config.results_dir, 'weights'), exist_ok=True)
    os.makedirs(os.path.join(config.results_dir, 'logs'), exist_ok=True)

    for fold, (train_idx, val_idx) in enumerate(folds):
        # train_idx = [train_idx[0]]
        if skip_folds and fold in skip_folds:
            continue

        save_path = os.path.join(config.results_dir, 'weights', config.folder)
        model = models[config.network](num_classes=1,
                                       num_channels=config.num_channels)
        estimator = Estimator(model,
                              optimizers[config.optimizer],
                              losses[config.loss],
                              save_path,
                              iter_size=config.iter_size,
                              lr=config.lr,
                              num_channels_changed=num_channels_changed)

        callbacks = [
            StepLR(config.lr, num_epochs_per_decay=30, lr_decay_factor=0.1),
            ModelSaver(1, ("fold" + str(fold) + "_best.pth"), best_only=True),
            CheckpointSaver(1, ("fold" + str(fold) + "_checkpoint.pth")),
            # EarlyStopper(10),
            TensorBoard(
                os.path.join(config.results_dir, 'logs', config.folder,
                             'fold{}'.format(fold)))
        ]

        # hard_neg_miner = HardNegativeMiner(rate=10)
        metrics = [('dice', dice), ('bce', binary_cross_entropy),
                   ('dice round', dice_round)]
        # metrics = []
        trainer = PytorchTrain(estimator,
                               fold=fold,
                               metrics=metrics,
                               callbacks=callbacks,
                               hard_negative_miner=None)

        train_loader = PytorchDataLoader(TrainDataset(ds,
                                                      train_idx,
                                                      config,
                                                      transforms=transforms),
                                         batch_size=config.batch_size,
                                         shuffle=True,
                                         drop_last=True,
                                         num_workers=num_workers,
                                         pin_memory=True)
        val_loader = PytorchDataLoader(ValDataset(ds,
                                                  val_idx,
                                                  config,
                                                  transforms=None),
                                       batch_size=config.batch_size,
                                       shuffle=False,
                                       drop_last=False,
                                       num_workers=num_workers,
                                       pin_memory=True)

        trainer.fit(train_loader, val_loader, config.nb_epoch)
Пример #7
0
def onetrain(trainds, valds, ntrain, nval, config, num_workers=0, transforms=None,
             num_channels_changed=False):
    """
    here we construct all needed structures and specify parameters
    """
    os.makedirs(os.path.join(config.results_dir, 'weights'), exist_ok=True)
    os.makedirs(os.path.join(config.results_dir, 'logs'), exist_ok=True)

    train_idx = [i for i in range(ntrain)]
    val_idx = [i for i in range(nval)]

    print('config.network: {}'.format(config.network))

    save_path = os.path.join(config.results_dir, 'weights', config.folder)
    model = None
    if config.network == 'extensionunet':
        model = models[config.network](in_channels=config.num_channels, n_classes=1,
                                       nonlinearity='leaky_relu')
    elif config.network == 'resnet34':
        model = models[config.network](num_classes=1, num_channels=config.num_channels)
    elif config.network == 'denseunet':
        model = models[config.network](in_channels=config.num_channels, n_classes=1)

    estimator = Estimator(model, optimizers[config.optimizer], losses[config.loss], save_path,
                          iter_size=config.iter_size, test_iter_size=config.test_iter_size,
                          lr=config.lr, num_channels_changed=num_channels_changed)

    callbacks = [
        StepLR(config.lr, num_epochs_per_decay=30, lr_decay_factor=0.1),
        # InverseLR(config.lr, decay_rate=0.95, decay_steps=500),
        ModelSaver(1, 20, ("onetrain_best.pth"), best_only=True),
        CheckpointSaver(1, 20, ("onetrain_checkpoint.pth")),
        # EarlyStopper(10),
        TensorBoard(os.path.join(config.results_dir, 'logs', config.folder, 'onetrain'))
    ]

    # hard_neg_miner = HardNegativeMiner(rate=10)
    metrics = [('dice', dice), ('bce', binary_cross_entropy), ('dice round', dice_round)]
    # metrics = []
    trainer = PytorchTrain(estimator,
                           fold=-1,
                           metrics=metrics,
                           callbacks=callbacks,
                           hard_negative_miner=None)

    train_loader = PytorchDataLoader(TrainDataset(trainds, train_idx, config,
                                     transforms=transforms),
                                     batch_size=config.batch_size,
                                     shuffle=True,
                                     drop_last=True,
                                     num_workers=num_workers,
                                     pin_memory=True)
    val_loader = PytorchDataLoader(ValDataset(valds, val_idx, config, transforms=None),
                                   batch_size=config.test_batch_size,
                                   shuffle=False,
                                   drop_last=False,
                                   num_workers=num_workers,
                                   pin_memory=True)

    print('ntrain : nval = {} : {}'.format(ntrain, nval))
    print('len(train_loader) : len(val_loader) = {} : {}'.format(len(train_loader),
          len(val_loader)))

    trainer.fit(train_loader, val_loader, config.nb_epoch, config.folder)