Пример #1
0
def unet_masking(gray_image):
    from src.unet import UNet
    size = (gray_image.shape[1], gray_image.shape[0])
    images = np.zeros((1, IMAGE_SIZE, IMAGE_SIZE, 1), np.float32)
    image = cv2.resize(gray_image, (IMAGE_SIZE, IMAGE_SIZE))
    image = image[:, :, np.newaxis]
    #images[0] = normalize_x(image)
    images[0] = image

    input_channel_count = 1
    output_channel_count = 1
    first_layer_filter_count = 64
    network = UNet(input_channel_count, output_channel_count,
                   first_layer_filter_count)
    model = network.get_model()
    model.load_weights('unet_weights.hdf5')
    BATCH_SIZE = 1

    Y_pred = model.predict(images, BATCH_SIZE)
    y = cv2.resize(Y_pred[0], size)
    y_dn = denormalize_y(y)
    y_dn = np.uint8(y_dn)
    ret, mask = cv2.threshold(y_dn, 0, 255, cv2.THRESH_OTSU)
    masked = cv2.bitwise_and(gray_image, mask)
    mask_rest = cv2.bitwise_not(mask)
    masked = cv2.bitwise_or(masked, mask_rest)

    return masked
Пример #2
0
    def __init__(self,
                 Cin,
                 Cout,
                 Cnoise,
                 n_blocks=5,
                 filter_factors=None,
                 kernel_size=3,
                 dropout=0.5,
                 disc_size=64,
                 bottleneck_dim=27,
                 device=None,
                 multi_disc=False,
                 use_leaky=False,
                 conditional_disc=False):
        super(GAN, self).__init__()
        self.bottleneck_dim = bottleneck_dim
        self.g = UNet(
            Cin + Cnoise,
            Cout,
            n_blocks,
            filter_factors,
            kernel_size,
            dropout,
            bottleneck_dim,
            device,
            use_leaky,
        )
        Cin_disc = Cin + Cout if conditional_disc else Cout
        self.d = (Discriminator(Cin_disc, disc_size, device=device)
                  if not multi_disc else MultiDiscriminator(Cin_disc,
                                                            device=device))

        self.g.apply(self.init_weights)
Пример #3
0
def train_net(data_dir,
              cross_valid_ind=1,
              epochs=400,
              batch_size=16,
              lr=0.0001,
              run_distribute=False,
              cfg=None):

    if run_distribute:
        init()
        group_size = get_group_size()
        parallel_mode = ParallelMode.DATA_PARALLEL
        context.set_auto_parallel_context(parallel_mode=parallel_mode,
                                          device_num=group_size,
                                          gradients_mean=False)
    net = UNet(n_channels=cfg['num_channels'], n_classes=cfg['num_classes'])

    if cfg['resume']:
        param_dict = load_checkpoint(cfg['resume_ckpt'])
        load_param_into_net(net, param_dict)

    criterion = CrossEntropyWithLogits()
    train_dataset, _ = create_dataset(data_dir, epochs, batch_size, True,
                                      cross_valid_ind, run_distribute)
    train_data_size = train_dataset.get_dataset_size()
    print("dataset length is:", train_data_size)
    ckpt_config = CheckpointConfig(
        save_checkpoint_steps=train_data_size,
        keep_checkpoint_max=cfg['keep_checkpoint_max'])
    ckpoint_cb = ModelCheckpoint(prefix='ckpt_unet_medical_adam',
                                 directory='./ckpt_{}/'.format(device_id),
                                 config=ckpt_config)

    optimizer = nn.Adam(params=net.trainable_params(),
                        learning_rate=lr,
                        weight_decay=cfg['weight_decay'],
                        loss_scale=cfg['loss_scale'])

    loss_scale_manager = mindspore.train.loss_scale_manager.FixedLossScaleManager(
        cfg['FixedLossScaleManager'], False)

    model = Model(net,
                  loss_fn=criterion,
                  loss_scale_manager=loss_scale_manager,
                  optimizer=optimizer,
                  amp_level="O3")

    print("============== Starting Training ==============")
    model.train(
        1,
        train_dataset,
        callbacks=[StepLossTimeMonitor(batch_size=batch_size), ckpoint_cb],
        dataset_sink_mode=False)
    print("============== End Training ==============")
Пример #4
0
 def __init__(self, model_features, encoder_depth, padding_mode="zeros"):
     super(Net, self).__init__()
     self.stft = STFT()
     self.unet = UNet(model_features=model_features,
                      encoder_depth=encoder_depth,
                      padding_mode=padding_mode)
     self.masking = ComplexMaskOnPolarCoo()
     self.istft = ISTFT()
Пример #5
0
class GAN(nn.Module):
    def __init__(self,
                 Cin,
                 Cout,
                 Cnoise,
                 n_blocks=5,
                 filter_factors=None,
                 kernel_size=3,
                 dropout=0.5,
                 disc_size=64,
                 bottleneck_dim=27,
                 device=None,
                 multi_disc=False,
                 use_leaky=False,
                 conditional_disc=False):
        super(GAN, self).__init__()
        self.bottleneck_dim = bottleneck_dim
        self.g = UNet(
            Cin + Cnoise,
            Cout,
            n_blocks,
            filter_factors,
            kernel_size,
            dropout,
            bottleneck_dim,
            device,
            use_leaky,
        )
        Cin_disc = Cin + Cout if conditional_disc else Cout
        self.d = (Discriminator(Cin_disc, disc_size, device=device)
                  if not multi_disc else MultiDiscriminator(Cin_disc,
                                                            device=device))

        self.g.apply(self.init_weights)
        # self.d.apply(self.init_weights) # d has own init

    def init_weights(self, m):
        if type(m) == nn.Linear:
            nn.init.xavier_uniform_(m.weight)
            nn.init.uniform_(m.bias, -0.1, 0.1)
Пример #6
0
def test_net(data_dir, ckpt_path, cross_valid_ind=1, cfg=None):
    net = UNet(n_channels=cfg['num_channels'], n_classes=cfg['num_classes'])
    param_dict = load_checkpoint(ckpt_path)
    load_param_into_net(net, param_dict)

    criterion = CrossEntropyWithLogits()
    _, valid_dataset = create_dataset(data_dir, 1, 1, False, cross_valid_ind,
                                      False)
    model = Model(net, loss_fn=criterion, metrics={"dice_coeff": dice_coeff()})

    print("============== Starting Evaluating ============")
    dice_score = model.eval(valid_dataset, dataset_sink_mode=False)
    print("Cross valid dice coeff is:", dice_score)
Пример #7
0
def load_model(use_cpu=False):
    """
    Loads the model.
    :param use_cpu: if True, use cpu and not cuda
    :return: the model, the device
    """

    device = torch.device('cpu' if use_cpu else 'cuda')
    model = UNet(1, 1, bilinear=False)
    model = model.to(device)
    model.load_state_dict(torch.load(MODEL_PATH, map_location=device))
    model.eval()
    return model, device
Пример #8
0
def predict(tiles_dir, mask_dir, tile_size, device, chkpt):
    # load device
    net = UNet(2).to(device)
    net = nn.DataParallel(net)
    net.load_state_dict(chkpt["state_dict"])
    net.eval()

    # preprocess and load
    mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
    transform = Compose([
        ConvertImageMode(mode="RGB"),
        ImageToTensor(),
        Normalize(mean=mean, std=std)
    ])

    # tiles file, need to get it again, or do we really need it? why not just predict
    directory = BufferedSlippyMapDirectory(tiles_dir,
                                           transform=transform,
                                           size=tile_size)
    assert len(directory) > 0, "at least one tile in dataset"

    # loading data
    loader = DataLoader(directory, batch_size=1)

    # don't track tensors with autograd during prediction
    with torch.no_grad():
        for images, tiles in tqdm(loader,
                                  desc="Eval",
                                  unit="batch",
                                  ascii=True):
            images = images.to(device)
            outputs = net(images)

            # manually compute segmentation mask class probabilities per pixel
            probs = nn.functional.softmax(outputs, dim=1).data.cpu().numpy()

            for tile, prob in zip(tiles, probs):
                x, y, z = list(map(int, tile))

                prob = directory.unbuffer(prob)
                mask = np.argmax(prob, axis=0)
                mask = mask * 200
                mask = mask.astype(np.uint8)

                palette = make_palette("dark", "light")
                out = Image.fromarray(mask, mode="P")
                out.putpalette(palette)

                os.makedirs(os.path.join(mask_dir, str(z), str(x)),
                            exist_ok=True)
                path = os.path.join(mask_dir, str(z), str(x), str(y) + ".png")
                out.save(path, optimize=True)

    print("Prediction Done, saved masks to " + mask_dir)
Пример #9
0
    images_res = [resize(img, (256, 256, 3)) for img in images]
    resize_kwargs = {'order': 0,
                     'anti_aliasing': False, 'preserve_range': True}
    targets_res = [resize(tar, (256, 256), **resize_kwargs) for tar in targets]

    # device
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    # model
    model = UNet(in_channels=3,
                 out_channels=3,
                 n_blocks=4,
                 start_filters=32,
                 activation='relu',
                 normalization='batch',
                 conv_mode='same',
                 dim=2).to(device)

    model_name = 'seg_unet_model.pt'
    model_weights = torch.load(pathlib.Path.cwd() / model_name)

    model.load_state_dict(model_weights)
    # predict the segmentation maps
    output = [predict(img, model, preprocess, postprocess, device)
              for img in images_res]

    # view predictions with napari
    # the t key for next does not work yet as expected
    with napari.gui_qt():
Пример #10
0
def loop():
    device = torch.device("cuda")

    if not torch.cuda.is_available():
        sys.exit("Error: CUDA requested but not available")

    # weighted values for loss functions
    # add a helper to return weights seamlessly
    try:
        weight = torch.Tensor([1.513212, 10.147043])
    except KeyError:
        if model["opt"]["loss"] in ("CrossEntropy", "mIoU", "Focal"):
            sys.exit("Error: The loss function used, need dataset weights values")

    # loading Model
    net = UNet(num_classes)
    net = DataParallel(net)
    net = net.to(device)

    # define optimizer 
    optimizer = Adam(net.parameters(), lr=lr)

    # resume training
    if model_path:
        chkpt = torch.load(model_path, map_location=device)
        net.load_state_dict(chkpt["state_dict"])
        optimizer.load_state_dict(chkpt["optimizer"])

    # select loss function, just set a default, or try to experiment
    if loss_func == "CrossEntropy":
        criterion = CrossEntropyLoss2d(weight=weight).to(device)
    elif loss_func == "mIoU":
        criterion = mIoULoss2d(weight=weight).to(device)
    elif loss_func == "Focal":
        criterion = FocalLoss2d(weight=weight).to(device)
    elif loss_func == "Lovasz":
        criterion = LovaszLoss2d().to(device)
    else:
        sys.exit("Error: Unknown Loss Function value !")

    #loading data
    train_loader, val_loader = get_dataset_loaders(target_size, batch_size, dataset_path)
    history = collections.defaultdict(list)

    # training loop
    for epoch in range(0, num_epochs):

        print("Epoch: " + str(epoch +1))
        train_hist = train(train_loader, num_classes, device, net, optimizer, criterion)
        
        val_hist = validate(val_loader, num_classes, device, net, criterion)
        
        print("Train loss: {:.4f}, mIoU: {:.3f}, {} IoU: {:.3f}, MCC: {:.3f}".format(
                train_hist["loss"], train_hist["miou"], target_type, train_hist["fg_iou"], train_hist["mcc"]))
        
        print("Validation loss: {:.4f}, mIoU: {:.3f}, {} IoU: {:.3f}, MCC: {:.3f}".format(
                 val_hist["loss"], val_hist["miou"], target_type, val_hist["fg_iou"], val_hist["mcc"]))
        
        for key, value in train_hist.items():
            history["train " + key].append(value)

        for key, value in val_hist.items():
            history["val " + key].append(value)

        if (epoch+1)%5 == 0:
            # plotter use history values, no need for log
            visual = "history-{:05d}-of-{:05d}.png".format(epoch + 1, num_epochs)
            plot(os.path.join(checkpoint_path, visual), history)
        
        if (epoch+1)%20 == 0:
            checkpoint = target_type + "-checkpoint-{:03d}-of-{:03d}.pth".format(epoch + 1, num_epochs)
            states = {"epoch": epoch + 1, "state_dict": net.state_dict(), "optimizer": optimizer.state_dict()}
            torch.save(states, os.path.join(checkpoint_path, checkpoint))
Пример #11
0
# Model path
UNET_MODEL_PATH = os.environ.get("UNET_MODEL_PATH")


if __name__ == '__main__':
    is_generator = False
    test_frames = None
    test_masks = None

    if not os.path.isfile(UNET_MODEL_PATH):
        quit(f"Model file not found {UNET_MODEL_PATH}")
    if not os.path.isdir(FRAMES_TEST_IN_PATH) or not os.path.isdir(MASKS_TEST_IN_PATH):
        quit(f"Directory not found")

    # Create model
    model = UNet.build(pre_trained=True, model_path=UNET_MODEL_PATH, n_classes=3, input_h=320, input_w=320)

    # If images are loaded from generator
    if is_generator:
        test_data_generated = data_generator(frames_path=FRAMES_TEST_IN_PATH,
                                             masks_path=MASKS_TEST_IN_PATH,
                                             fnames=os.listdir(MASKS_TEST_IN_PATH),
                                             input_h=320,
                                             input_w=320,
                                             n_classes=3,
                                             batch_size=10,
                                             is_resizable=True)
        predicted_masks = model.predict_generator(test_data_generated, steps=15)

    # If images are loaded from builder(no generator)
    else:
Пример #12
0
        f"unet_log_on_{date.today().strftime('%m_%d_%y')}.csv"),
                             append=True)
    # callback 2: saving model
    cb_checkpoint = ModelCheckpoint(model_checkpoint_path,
                                    monitor='dice',
                                    verbose=1,
                                    mode='max',
                                    save_best_only=True,
                                    save_weights_only=False,
                                    period=2)  # monitor every 2 epochs

    callback_list = [cb_checkpoint, cb_csvlogger]

    # Create model
    model = UNet.build(pre_trained=False,
                       model_path=MODELS_OUT_PATH,
                       n_classes=3,
                       input_h=320,
                       input_w=320)

    history = model.fit_generator(
        generator=train_generator,
        steps_per_epoch=
        32,  # train_len(850 images) = batch_size * steps_per_epoch
        validation_data=val_generator,
        validation_steps=
        20,  # val_len(150 images) = batch_size * validation_steps
        epochs=200,
        verbose=2,
        callbacks=callback_list)
Пример #13
0
def create_network(name, *args, **kwargs):
    if name == "unet2d":
        unet = UNet(*args, **kwargs)
        return unet
    raise NotImplementedError(f"{name} is not implemented in the repo")
Пример #14
0
    ]

nl = NeuronLoader(url, data, train_opts, test_opts)

if test:
    print('Beginning Test Suite...')
    subprocess.call('python -m pytest', shell=True)
else:
    if model == 'nmf':
        if preprocess:
            print(
                'Preprocessing techniques have not been tested for, with NMF.')
            print('Proceeding with regular NMF technique.')
        model = Nmf(nl.test_files)
        model.get_output()
    elif model == 'unet':
        if preprocess:
            Preprocessing(nl.data, transform, _filter)
            model = UNet(data=nl.data)
            model.run()
        else:
            model = UNet(data=nl.data)
            model.run()
    elif model == 'sparsepca':
        if preprocess:
            print(
                'Preprocessing techniques have not been tested for, with Sparse PCA.'
            )
            print('Proceeding with regular Sparse PCA technique.')
        sparse_pca(nl.test_files, nl.data)