Beispiel #1
0
def WNet(feature_maps=None, nb_classes=6, dropout=0.65, model_name_suffix=""):
    if feature_maps is None:
        feature_maps = [64, 128, 256, 512, 1024]

    encoder_input, encoder_output = UNet(input_size=(256, 256, 3),
                                         feature_maps=feature_maps,
                                         nb_classes=nb_classes,
                                         dropout=dropout,
                                         conv_padding="same",
                                         build_model=False)
    encoder_output = K.one_hot(K.argmax(encoder_output), nb_classes)
    _, decoder_output = UNet(input_layer=encoder_output,
                             nb_classes=3,
                             output_layer_activation="sigmoid",
                             dropout=dropout,
                             feature_maps=feature_maps,
                             conv_padding="same",
                             build_model=False)

    nb_conv_layers = 6 + 10 * (len(feature_maps) - 1)
    dropout_suffix = "D" if dropout > 0.0 else ""
    model_name = f"WNet-{nb_conv_layers}{dropout_suffix}-{nb_classes}{model_name_suffix}"
    full_model = Model(name=model_name,
                       inputs=encoder_input,
                       outputs=decoder_output)
    encoder_model = Model(name=f"{model_name}-Encoder",
                          inputs=encoder_input,
                          outputs=encoder_output)
    return full_model, encoder_model
Beispiel #2
0
def main(params):
    config = get_config(**vars(params))

    checkpoint_dir = os.path.join(params.checkpoint, 'checkpoints')
    figures_dir = os.path.join(params.checkpoint, 'figures')
    checkpoint = [os.path.join(checkpoint_dir, x) for x in os.listdir(checkpoint_dir)][0]

    model = UNet.load_from_checkpoint(checkpoint_path=checkpoint)
    model.freeze()
    model.hparams.dataset_folder = '/media/nvm/itype_/pth_snt/2019'
    model.hparams.batch_size = 1

    if params.metrics:
        trainer = Trainer(
            precision=16,
            gpus=config.device_ct,
            num_nodes=config.node_ct,
            log_every_n_steps=5)

        trainer.test(model)

    loader = model.val_dataloader()
    for i, (x, y) in enumerate(loader):
        out = model(x)
        pred = out.argmax(1)
        x, y, pred = x.squeeze().numpy(), y.squeeze().numpy(), pred.squeeze().numpy()
        fig = os.path.join(figures_dir, '{}.png'.format(i))
        plot_prediction(x, y, pred, model.mode, out_file=fig)
Beispiel #3
0
def main(params):

    config = get_config(**vars(params))

    model = UNet(**vars(config))

    log_dir = prepare_output(config)
    logger = TensorBoardLogger(log_dir, name='log')

    checkpoint_callback = ModelCheckpoint(dirpath=os.path.join(
        log_dir, 'checkpoints'),
                                          save_top_k=1,
                                          save_last=True,
                                          monitor='val_acc',
                                          verbose=True)

    lr_monitor = LearningRateMonitor(logging_interval='epoch')

    accelerator = 'ddp' if config.device_ct > 1 else None

    trainer = Trainer(precision=16,
                      min_epochs=50,
                      accelerator=accelerator,
                      gpus=config.device_ct,
                      num_nodes=config.node_ct,
                      callbacks=[checkpoint_callback, lr_monitor],
                      progress_bar_refresh_rate=params.progress,
                      log_every_n_steps=5,
                      logger=logger)

    trainer.fit(model)
Beispiel #4
0
def get_model(config):
    model = None

    if config['model'] == 'unet':
        model = UNet(config)

    return model
Beispiel #5
0
def get_model(params):
    arch = params.arch_name
    if arch.lower() == "unet":
        model = UNet(in_channels=3, out_channels=params.num_classes, init_features=32,
                     pretrained=params.pretrained, weights=params.model_weights)
    elif arch.lower() == "unet_resnet34":
        weights = "imagenet" if params.pretrained else None
        model = smp.Unet(encoder_name="resnet34", encoder_depth=5, in_channels=3, classes=1, encoder_weights=weights,
                         activation="sigmoid")
    elif arch.lower() == "unet_resnet34_scse":
        weights = "imagenet" if params.pretrained else None
        model = smp.Unet(encoder_name="resnet34", encoder_depth=5, in_channels=3, classes=1, encoder_weights=weights,
                         activation="sigmoid", decoder_attention_type="scse")
    elif arch.lower() == "unet_resnet50":
        weights = "imagenet" if params.pretrained else None
        model = smp.Unet(encoder_name="resnet50", encoder_depth=5, in_channels=3, classes=1, encoder_weights=weights,
                         activation="sigmoid")
    elif arch.lower() == "unet_resnet50_scse":
        weights = "imagenet" if params.pretrained else None
        model = smp.Unet(encoder_name="resnet50", encoder_depth=5, in_channels=3, classes=1, encoder_weights=weights,
                         activation="sigmoid", decoder_attention_type="scse")
    else:
        raise NotImplementedError("Unknown architecture: '{}".format(arch))
    return model
Beispiel #6
0
def do_training(initial_learning_rate=0.1):
    gids = get_gids_from_database("unet")
    training_gen, validation_gen = initialize_train_and_validation_generators(
        "unet", gids, batch_size=4, label_target_size=388)
    steps_per_epoch = next(training_gen)
    validation_steps = next(validation_gen)

    model = UNet(input_size=(572, 572, 3))
    metrics = [
        Accuracy(),
        CategoricalAccuracy(),
        CategoricalCrossentropy(),
        ArgmaxMeanIoU(num_classes=6, name="mean_iou")
    ]
    optimizer = SGD(learning_rate=initial_learning_rate,
                    momentum=0.99,
                    nesterov=True)
    model.compile(optimizer=optimizer,
                  loss=categorical_crossentropy,
                  metrics=metrics)

    start_time = int(time.time())
    os.mkdir(f"weights/{start_time}_{model.name}/")

    metrics_to_log = [
        "loss", "accuracy", "categorical_accuracy", "mean_iou",
        "categorical_crossentropy"
    ]
    model_path = f"weights/{start_time}_{model.name}/"
    callbacks = [
        save_model_on_epoch_end(model.name, model, model_path),
        metrics_to_csv_logger(model_path + "/batch.csv", metrics_to_log),
        CSVLogger(model_path + "/epoch.csv", separator=";"),
        LearningRateScheduler(lr_schedule(initial_lr=initial_learning_rate)),
    ]

    model.fit(training_gen,
              epochs=20,
              steps_per_epoch=steps_per_epoch,
              validation_data=validation_gen,
              validation_steps=validation_steps,
              callbacks=callbacks)
Beispiel #7
0
    arg = parser.parse_args()


  


    num_classes = 16
    model_name = arg.model
    learning_rate = arg.l_rate
    num_epochs = arg.n_epoch
    batch_size = arg.batch_size


    history = collections.defaultdict(list)
    model_dict = {
                'unet':UNet( num_classes = num_classes).train().to(device),
                'segnet':segnet(  n_classes = num_classes ).train().to(device),
                'pspnet':smp.PSPNet(classes= num_classes ).train().to(device),
                }

    net = model_dict[model_name]
    if torch.cuda.device_count() > 1:
        print("using multi gpu")
        net = torch.nn.DataParallel(net,device_ids = [0, 1, 2, 3])
    else:
        print('using one gpu')

    # if True:
    #     print("The ckp has been loaded sucessfully ")
    #     net = torch.load("./model/unet_2019-07-23.pth") # load the pretrained model
    criterion = FocalLoss2d().to(device)
Beispiel #8
0
def main(params):
    config = get_config(**vars(params))

    model = UNet(config)
    for k, v in model.hparams.items():
        print(k, v)