Beispiel #1
0
def main(unused_argv):
    tf.logging.set_verbosity(3)

    try:
        config = process_config()

    except:
        exit(0)

    create_dirs(config, [config.checkpoint_dir, config.evaluate_dir, config.presentation_dir, config.summary_dir])

    session = tf.Session()
    K.set_session(session)

    if config.mode == "evaluate":
        model = Unet(config, is_evaluating=True)
        trainer = Trainer(config, None, None, model, session)

        sat_data = [Aracati.load_data(file, is_grayscale=False) for file in
                    sorted(glob.glob("./datasets/aracati/test/input/*.png"))]
        sat_data = [sat_data[i:i+1] for i in range(len(sat_data))]

        model.load(session)
        trainer.evaluate_data(sat_data, Aracati.save_data)

    else:
        data = Aracati(config)
        model = Unet(config)
        logger = Logger(config, session)
        trainer = Trainer(config, data, logger, model, session)

        if config.mode == "restore":
            model.load(session)

        trainer.train()
Beispiel #2
0
def main():
    json_path = os.path.join(args.model_dir)
    params = utils.Params(json_path)

    net = Unet(params.model).cuda()
    # TODO - check exists
    #checkpoint = torch.load('./final.pth.tar')
    #net.load_state_dict(checkpoint)

    train_dataset = AudioDataset(data_type='train')
    test_dataset = AudioDataset(data_type='val')
    train_data_loader = DataLoader(dataset=train_dataset,
                                   batch_size=args.batch_size,
                                   collate_fn=train_dataset.collate,
                                   shuffle=True,
                                   num_workers=4)
    test_data_loader = DataLoader(dataset=test_dataset,
                                  batch_size=args.batch_size,
                                  collate_fn=test_dataset.collate,
                                  shuffle=False,
                                  num_workers=4)

    torch.set_printoptions(precision=10, profile="full")

    # Optimizer
    optimizer = optim.Adam(net.parameters(), lr=1e-3)
    # Learning rate scheduler
    scheduler = ExponentialLR(optimizer, 0.95)

    for epoch in range(args.num_epochs):
        train_bar = tqdm(train_data_loader)
        for input in train_bar:
            train_mixed, train_clean, seq_len = map(lambda x: x.cuda(), input)
            mixed = stft(train_mixed).unsqueeze(dim=1)
            real, imag = mixed[..., 0], mixed[..., 1]
            out_real, out_imag = net(real, imag)
            out_real, out_imag = torch.squeeze(out_real,
                                               1), torch.squeeze(out_imag, 1)
            out_audio = istft(out_real, out_imag, train_mixed.size(1))
            out_audio = torch.squeeze(out_audio, dim=1)
            for i, l in enumerate(seq_len):
                out_audio[i, l:] = 0
            librosa.output.write_wav(
                'mixed.wav', train_mixed[0].cpu().data.numpy()
                [:seq_len[0].cpu().data.numpy()], 16000)
            librosa.output.write_wav(
                'clean.wav', train_clean[0].cpu().data.numpy()
                [:seq_len[0].cpu().data.numpy()], 16000)
            librosa.output.write_wav(
                'out.wav', out_audio[0].cpu().data.numpy()
                [:seq_len[0].cpu().data.numpy()], 16000)
            loss = wSDRLoss(train_mixed, train_clean, out_audio)
            print(epoch, loss)
            optimizer.zero_grad()
            loss.backward()

            optimizer.step()
        scheduler.step()
    torch.save(net.state_dict(), './final.pth.tar')
Beispiel #3
0
 def test_init(self):
     unet_config = UnetConfig(input_size=(16, 16, 3),
                              filters=10,
                              dropout=0.6,
                              batchnorm=False)
     unet = Unet(config=unet_config)
     unet.compile(loss="binary_crossentropy", metrics=["accuracy"])
     unet.summary()
Beispiel #4
0
def main():
    json_path = os.path.join(args.conf)
    params = utils.Params(json_path)

    net = Unet(params.model).cuda()
    # TODO - check exists
    # if os.path.exists('./ckpt/final.pth.tar'):
    #     checkpoint = torch.load('./ckpt/final.pth.tar')
    #     net.load_state_dict(checkpoint)

    train_dataset = AudioDataset(data_type='train')
    # test_dataset = AudioDataset(data_type='val')
    train_data_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size,
                                   collate_fn=train_dataset.collate, shuffle=True, num_workers=0)
    # test_data_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size,
    #                               collate_fn=test_dataset.collate, shuffle=False, num_workers=4)

    # torch.set_printoptions(precision=10, profile="full")

    # Optimizer
    optimizer = optim.Adam(net.parameters(), lr=1e-2)
    # Learning rate scheduler
    scheduler = ExponentialLR(optimizer, 0.996)

    if not os.path.exists('ckpt'): # model save dir
        os.mkdir('ckpt')

    for epoch in range(1, args.num_epochs+1):
        train_bar = tqdm(train_data_loader, ncols=60)
        loss_sum = 0.0
        step_cnt = 0
        for input_ in train_bar:
            train_mixed, train_clean, seq_len = map(lambda x: x.cuda(), input_)
            mixed = stft(train_mixed).unsqueeze(dim=1)
            real, imag = mixed[..., 0], mixed[..., 1]
            out_real, out_imag = net(real, imag)
            out_real, out_imag = torch.squeeze(out_real, 1), torch.squeeze(out_imag, 1)
            out_audio = istft(out_real, out_imag, train_mixed.size(1))
            out_audio = torch.squeeze(out_audio, dim=1)
            for i, l in enumerate(seq_len):
                out_audio[i, l:] = 0
            # librosa.output.write_wav('mixed.wav', train_mixed[0].cpu().data.numpy()[:seq_len[0].cpu().data.numpy()], 16000)
            # librosa.output.write_wav('clean.wav', train_clean[0].cpu().data.numpy()[:seq_len[0].cpu().data.numpy()], 16000)
            # librosa.output.write_wav('out.wav', out_audio[0].cpu().data.numpy()[:seq_len[0].cpu().data.numpy()], 16000)
            loss = wSDRLoss(train_mixed, train_clean, out_audio)
            # print(epoch, loss.item(), end='', flush=True)
            loss_sum += loss.item()
            step_cnt += 1
            optimizer.zero_grad()
            loss.backward()

            optimizer.step()

        avg_loss = loss_sum / step_cnt
        print('epoch %d> Avg_loss: %.6f.\n' % (epoch, avg_loss))
        scheduler.step()
        if epoch %20 == 0:
            torch.save(net.state_dict(), './ckpt/step%05d.pth.tar' % epoch)
def get_model(name, in_channels, out_channels, **kwargs):
    if name == "unet":
        return Unet(in_channels, out_channels)
    if name == "baby-unet":
        return BabyUnet(in_channels, out_channels)
    if name == "dncnn":
        return DnCNN(in_channels, out_channels)
    if name == "convolution":
        return SingleConvolution(in_channels, out_channels, kwargs["width"])
Beispiel #6
0
def get_model(name, in_channels, out_channels, **kwargs):
    if name == 'unet':
        return Unet(in_channels, out_channels)
    if name == 'baby-unet':
        return BabyUnet(in_channels, out_channels)
    if name == 'dncnn':
        return DnCNN(in_channels, out_channels)
    if name == 'convolution':
        return SingleConvolution(in_channels, out_channels, kwargs['width'])
    def __init__(self,
                 input_channels=1,
                 num_classes=1,
                 num_filters=None,
                 latent_levels=1,
                 latent_dim=2,
                 initializers=None,
                 no_convs_fcomb=4,
                 image_size=(1, 128, 128),
                 beta=10.0,
                 reversible=False):
        super(ProbabilisticUnet, self).__init__()
        self.input_channels = input_channels
        self.num_classes = num_classes
        self.num_filters = num_filters
        self.latent_dim = latent_dim
        self.no_convs_per_block = 3
        self.no_convs_fcomb = no_convs_fcomb
        self.initializers = {'w': 'he_normal', 'b': 'normal'}
        self.z_prior_sample = 0

        self.unet = Unet(self.input_channels,
                         self.num_classes,
                         self.num_filters,
                         initializers=self.initializers,
                         apply_last_layer=False,
                         padding=True,
                         reversible=reversible).to(device)
        self.prior = AxisAlignedConvGaussian(
            self.input_channels,
            self.num_filters,
            self.no_convs_per_block,
            self.latent_dim,
            initializers=self.initializers).to(device)
        self.posterior = AxisAlignedConvGaussian(
            self.input_channels,
            self.num_filters,
            self.no_convs_per_block,
            self.latent_dim,
            initializers=self.initializers,
            posterior=True).to(device)
        self.fcomb = Fcomb(self.num_filters,
                           self.latent_dim,
                           self.input_channels,
                           self.num_classes,
                           self.no_convs_fcomb,
                           initializers={
                               'w': 'orthogonal',
                               'b': 'normal'
                           },
                           use_tile=True).to(device)

        self.last_conv = Conv2D(32,
                                num_classes,
                                kernel_size=1,
                                activation=torch.nn.Identity,
                                norm=torch.nn.Identity)
def unet_tf_pth(checkpoint_path, pth_output_path):
    model = Unet().eval()
    state_dict = model.state_dict()

    reader = tf.train.NewCheckpointReader(checkpoint_path)

    pth_keys = state_dict.keys()
    keys = sorted(reader.get_variable_to_shape_map().keys())
    print(keys)
    print(pth_keys)
Beispiel #9
0
def test_unet_gradients(basic_image):
    labels = basic_image
    model = Unet(num_classes=1, depth=1)
    loss_fn = tf.keras.losses.CategoricalCrossentropy()
    with tf.GradientTape() as tape:
        predictions = model(basic_image)
        loss = loss_fn(labels, predictions)

    gradients = tape.gradient(loss, model.trainable_variables)
    assert all([g is not None for g in gradients])
Beispiel #10
0
def single_model_factory(model_name, C):
    name = model_name.strip().upper()
    if name == 'SIAMUNET_CONC':
        from models.siamunet_conc import SiamUnet_conc
        return SiamUnet_conc(C.num_feats_in, 2)
    elif name == 'SIAMUNET_DIFF':
        from models.siamunet_diff import SiamUnet_diff
        return SiamUnet_diff(C.num_feats_in, 2)
    elif name == 'EF':
        from models.unet import Unet
        return Unet(C.num_feats_in, 2)
    else:
        raise NotImplementedError("{} is not a supported architecture".format(model_name))
Beispiel #11
0
 def _build_model(self, model_config: Bunch) -> NoReturn:
     assert model_config.type in (
         "care", "n2v",
         "pn2v"), "Model type must be either care, n2v, or pn2v"
     self.model_type = model_config.type
     noise_histogram = np.load(model_config.noise_model_path)
     self.noise_model = NoiseModel(noise_histogram)
     self.unet = Unet(model_config.num_classes, model_config.depth,
                      model_config.initial_filters)
     self.optimizer = Adam()
     self.loss_fn = self.loss_pn2v
     self.model_path = model_config.model_path
     os.makedirs(self.model_path)
     if self.model_type in ("care", "n2v"):
         self.noise_fn = self.loss_n2v
Beispiel #12
0
def test(args):

    # Setup Dataloader
    data_json = json.load(open('config.json'))
    data_path = data_json[args.dataset]['data_path']

    t_loader = SaltLoader(data_path, split="test")
    test_df=t_loader.test_df
    test_loader = data.DataLoader(t_loader, batch_size=args.batch_size, num_workers=8)

    # load Model
    if args.arch=='unet':
        model = Unet(start_fm=16)
    else:
        model=Unet_upsample(start_fm=16)
    model_path = data_json[args.model]['model_path']
    model.load_state_dict(torch.load(model_path)['model_state'])
    model.cuda()
    total = sum([param.nelement() for param in model.parameters()])
    print('Number of params: %.2fM' % (total / 1e6))

    #test
    pred_list=[]
    for images in test_loader:
        images = Variable(images.cuda())
        y_preds = model(images)
        y_preds_shaped = y_preds.reshape(-1,  args.img_size_target, args.img_size_target)
        for idx in range(args.batch_size):
            y_pred = y_preds_shaped[idx]
            pred = torch.sigmoid(y_pred)
            pred = pred.cpu().data.numpy()
            pred_ori = resize(pred, (args.img_size_ori, args.img_size_ori), mode='constant', preserve_range=True)
            pred_list.append(pred_ori)

    #submit the test image predictions.
    threshold_best=args.threshold
    pred_dict = {idx: RLenc(np.round(pred_list[i] > threshold_best)) for i, idx in
                 enumerate(tqdm_notebook(test_df.index.values))}
    sub = pd.DataFrame.from_dict(pred_dict, orient='index')
    sub.index.names = ['id']
    sub.columns = ['rle_mask']
    sub.to_csv('./results/{}_submission.csv'.format(args.model))
    print("The submission.csv saved in ./results")
Beispiel #13
0
def main():

    videos, audios = get_data("v1", 1)
    print("Data Loaded")
    # if os.path.exists('./Unet.pt'):
    #    unet = torch.load('./Unet.pt')
      #  frame_discriminator = torch.load()
    unet = Unet(debug=False)
    frame_discriminator = FrameDiscriminator()
    sequence_discriminator = SequenceDiscriminator()
    if cuda:
        print('SAHI JA RHA.......')
        unet = unet.cuda()
        frame_discriminator = frame_discriminator.cuda()
        sequence_discriminator = sequence_discriminator.cuda()
    # if torch.cuda.device_count() > 1:
        # print("Using ", torch.cuda.device_count(), " GPUs!")
        # unet = nn.DataParallel(unet)
        # frame_discriminator = nn.DataParallel(frame_discriminator)
        # sequence_discriminator = nn.DataParallel(sequence_discriminator)
    train(audios, videos, unet, frame_discriminator, sequence_discriminator)
Beispiel #14
0
def test(args):

    # Setup Data
    data_json = json.load(open('config.json'))
    x = Variable(torch.randn(32, 1, 128, 128))
    x = x.cuda()

    # load Model
    if args.arch == 'unet':
        model = Unet(start_fm=16)
    else:
        model = Unet_upsample(start_fm=16)
    model_path = data_json[args.model]['model_path']
    model.load_state_dict(torch.load(model_path)['model_state'])
    model.cuda()
    total = sum([param.nelement() for param in model.parameters()])
    print('Number of params: %.2fM' % (total / 1e6))

    #visualize
    y = model(x)
    g = make_dot(y)
    g.render('k')
Beispiel #15
0
def main():
    json_path = os.path.join(args.model_dir)
    params = utils.Params(json_path)

    net = Unet(params.model).cuda()
    # TODO - check exists
    checkpoint = torch.load(args.ckpt)
    net.load_state_dict(checkpoint)

    # train_dataset = AudioDataset(data_type='train')
    test_dataset = AudioDataset(data_type='val')
    # train_data_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size,
    #                                collate_fn=train_dataset.collate, shuffle=True, num_workers=0)
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=1,
                                  collate_fn=test_dataset.collate, shuffle=False, num_workers=0)

    torch.set_printoptions(precision=10, profile="full")


    train_bar = tqdm(test_data_loader, ncols=60)
    cnt = 1
    with torch.no_grad():
        for input_ in train_bar:
            train_mixed, train_clean, seq_len = map(lambda x: x.cuda(), input_)
            mixed = stft(train_mixed).unsqueeze(dim=1)
            real, imag = mixed[..., 0], mixed[..., 1]
            out_real, out_imag = net(real, imag)
            out_real, out_imag = torch.squeeze(out_real, 1), torch.squeeze(out_imag, 1)
            out_audio = istft(out_real, out_imag, train_mixed.size(1))
            out_audio = torch.squeeze(out_audio, dim=1)
            for i, l in enumerate(seq_len):
                out_audio[i, l:] = 0
            # librosa.output.write_wav('mixed.wav', train_mixed[0].cpu().data.numpy()[:seq_len[0].cpu().data.numpy()], 16000)
            # librosa.output.write_wav('clean.wav', train_clean[0].cpu().data.numpy()[:seq_len[0].cpu().data.numpy()], 16000)
            sf.write('enhanced_testset/enhanced_%03d.wav' % cnt, np.array(out_audio[0].cpu().data.numpy()[:seq_len[0].cpu().data.numpy()], dtype=np.float32), 16000,)
            cnt += 1
Beispiel #16
0
    fName = fName + '_probUnet'
    net = cFlowNet(input_channels=1,
                   num_classes=1,
                   num_filters=[32, 64, 128, 256],
                   latent_dim=6,
                   no_convs_fcomb=4,
                   norm=True,
                   flow=args.flow)
elif args.unet:
    print("Using Det. Unet")
    fName = fName + '_Unet'
    net = Unet(input_channels=1,
               num_classes=1,
               num_filters=[32, 64, 128, 256],
               apply_last_layer=True,
               padding=True,
               norm=True,
               initializers={
                   'w': 'he_normal',
                   'b': 'normal'
               })
    criterion = nn.BCELoss(size_average=False)
else:
    print("Choose a model.\nAborting....")
    sys.exit()

if not os.path.exists('logs'):
    os.mkdir('logs')

logFile = 'logs/' + fName + '.txt'
makeLogFile(logFile)
import tensorflow as tf
import cv2
from models.unet import Unet
from data_augmentation.data_augmentation import DataAugmentation
import numpy as np

gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)

# Initialize
IMAGE_PATH = "dataset/Original/Testing/"
MASK_PATH = "dataset/MASKS/Testing/"
IMAGE_FILE = "Frame00314-org"

model = Unet(input_shape=(224, 224, 1)).build()
model.load_weights("models/model_weight.h5")
model.summary()
print("yeah")


def convert_to_tensor(numpy_image):
    numpy_image = np.expand_dims(numpy_image, axis=2)
    numpy_image = np.expand_dims(numpy_image, axis=0)
    tensor_image = tf.convert_to_tensor(numpy_image)
    return tensor_image


def predict(image):
    process_obj = DataAugmentation(input_size=224, output_size=224)
    image_processed = process_obj.data_process_test(image)
    tensor_image = convert_to_tensor(image_processed)
def build_Unet_model(C):
    from models.unet import Unet
    return Unet(6, 2)
Beispiel #19
0
def get_model(model_name, in_channel, n_classes):
    return {
        'unetresnet': UNetResNet(in_channel, n_classes),
        'unet': Unet(in_channel, n_classes),
        'unetplus': UnetPlus(in_channel, n_classes)
    }[model_name]
Beispiel #20
0
def train(fold, train_case_indexes, val_case_indexes, args):
    tf.keras.backend.clear_session()
    log_dir = os.path.join(args.target_path, 'fold_' + str(fold) + '/')
    if not os.path.isdir(log_dir):
        os.mkdir(log_dir)

    model = Unet(n_class=4)

    # get loss and op
    loss_object = dice_loss
    optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr)

    # get metrics
    loss_metric = tf.keras.metrics.Mean(name='train_loss')
    iou_1 = tf.keras.metrics.MeanIoU(num_classes=2, name='train_iou_1')
    iou_2 = tf.keras.metrics.MeanIoU(num_classes=2, name='train_iou_2')
    iou_3 = tf.keras.metrics.MeanIoU(num_classes=2, name='train_iou_3')
    dice_1 = tf.keras.metrics.Mean(name='train_dice_1')
    dice_2 = tf.keras.metrics.Mean(name='train_dice_2')
    dice_3 = tf.keras.metrics.Mean(name='train_dice_3')

    step = 0

    record_loss, record_iou_1, record_iou_2, record_iou_3, record_dice_1, record_dice_2, record_dice_3 = 0, 0, 0, 0, 0, 0, 0

    summary_writer = tf.summary.create_file_writer(log_dir, model._graph)

    train_dataset = get_dataset_hdr(args.data_path,
                                    case_indexes=train_case_indexes,
                                    batch_size=args.batch_size,
                                    is_training=True)
    val_dataset = get_dataset_hdr(args.data_path,
                                  case_indexes=val_case_indexes,
                                  batch_size=args.batch_size,
                                  is_training=False)

    template_1 = 'Epoch {}, Loss: {}, IoU_1: {}, IoU_2: {}, IoU_3: {}, Dice_1: {}, Dice_2: {}, Dice_3: {} '
    template_2 = 'Val Loss: {}, Val_IoU_1: {}, Val_IoU_2: {}, Val_IoU_3: {}, Val_Dice_1: {}, Val_Dice_2: {}, Val_Dice_3: {} '
    print('-----------------------------------------------------------')
    print('fold ', fold)

    lr = args.lr
    for epoch in range(args.epochs):
        if (epoch + 1) % 5 == 0:
            lr = lr * 0.2
            optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
        # training
        loss_metric.reset_states()
        iou_1.reset_states()
        iou_2.reset_states()
        iou_3.reset_states()
        dice_1.reset_states()
        dice_2.reset_states()
        dice_3.reset_states()
        for images, labels in train_dataset:
            with tf.GradientTape() as tape:
                predictions = model(images, training=True)
                train_loss = loss_object(labels, predictions)
            gradients = tape.gradient(train_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(gradients,
                                          model.trainable_variables))

            loss_metric.update_state(train_loss)
            iou_1.update_state(labels[..., 1], predictions[..., 1])
            iou_2.update_state(labels[..., 2], predictions[..., 2])
            iou_3.update_state(labels[..., 3], predictions[..., 3])
            dice_1.update_state(
                dice(labels[:, :, :, :, 1], predictions[:, :, :, :, 1]))
            dice_2.update_state(
                dice(labels[:, :, :, :, 2], predictions[:, :, :, :, 2]))
            dice_3.update_state(
                dice(labels[:, :, :, :, 3], predictions[:, :, :, :, 3]))
        print(
            template_1.format(epoch + 1,
                              np.round(loss_metric.result().numpy(), 5),
                              np.round(iou_1.result().numpy() * 100, 5),
                              np.round(iou_2.result().numpy() * 100, 5),
                              np.round(iou_3.result().numpy() * 100, 5),
                              np.round(dice_1.result().numpy() * 100, 5),
                              np.round(dice_2.result().numpy() * 100, 5),
                              np.round(dice_3.result().numpy() * 100, 5)))
        with summary_writer.as_default():
            tf.summary.scalar('train_loss', loss_metric.result(), step=epoch)
            tf.summary.scalar('train_iou_1', iou_1.result(), step=epoch)
            tf.summary.scalar('train_iou_2', iou_2.result(), step=epoch)
            tf.summary.scalar('train_iou_3', iou_3.result(), step=epoch)
            tf.summary.scalar('train_dice_1', dice_1.result(), step=epoch)
            tf.summary.scalar('train_dice_2', dice_2.result(), step=epoch)
            tf.summary.scalar('train_dice_3', dice_3.result(), step=epoch)
            tf.summary.image('prediction',
                             predictions[:, :, :, 32, :],
                             step=epoch)
            tf.summary.image('label', labels[:, :, :, 32, :], step=epoch)

        # validation
        loss_metric.reset_states()
        iou_1.reset_states()
        iou_2.reset_states()
        iou_3.reset_states()
        dice_1.reset_states()
        dice_2.reset_states()
        dice_3.reset_states()
        for images, labels in val_dataset:
            predictions = model(images)
            val_loss = loss_object(labels, predictions)
            loss_metric.update_state(val_loss)
            iou_1.update_state(labels[..., 1], predictions[..., 1])
            iou_2.update_state(labels[..., 2], predictions[..., 2])
            iou_3.update_state(labels[..., 3], predictions[..., 3])
            dice_1.update_state(
                dice(labels[:, :, :, :, 1], predictions[:, :, :, :, 1]))
            dice_2.update_state(
                dice(labels[:, :, :, :, 2], predictions[:, :, :, :, 2]))
            dice_3.update_state(
                dice(labels[:, :, :, :, 3], predictions[:, :, :, :, 3]))

        print(
            template_2.format(np.round(loss_metric.result().numpy(), 5),
                              np.round(iou_1.result().numpy() * 100, 5),
                              np.round(iou_2.result().numpy() * 100, 5),
                              np.round(iou_3.result().numpy() * 100, 5),
                              np.round(dice_1.result().numpy() * 100, 5),
                              np.round(dice_2.result().numpy() * 100, 5),
                              np.round(dice_3.result().numpy() * 100, 5)))
        with summary_writer.as_default():
            tf.summary.scalar('val_loss', loss_metric.result(), step=epoch)
            tf.summary.scalar('val_iou_1', iou_1.result(), step=epoch)
            tf.summary.scalar('val_iou_2', iou_2.result(), step=epoch)
            tf.summary.scalar('val_iou_3', iou_3.result(), step=epoch)
            tf.summary.scalar('val_dice_1', dice_1.result(), step=epoch)
            tf.summary.scalar('val_dice_2', dice_2.result(), step=epoch)
            tf.summary.scalar('val_dice_3', dice_3.result(), step=epoch)
            tf.summary.image('val_prediction',
                             predictions[:, :, :, 32, :],
                             step=epoch)
            tf.summary.image('val_label', labels[:, :, :, 32, :], step=epoch)

        record_loss, record_iou_1, record_iou_2, record_iou_3, record_dice_1, record_dice_2, record_dice_3 = \
            loss_metric.result().numpy(), \
            iou_1.result().numpy() * 100, \
            iou_2.result().numpy() * 100,\
            iou_3.result().numpy() * 100, \
            dice_1.result().numpy() * 100,\
            dice_2.result().numpy() * 100, \
            dice_3.result().numpy() * 100,

        model.save_weights(os.path.join(log_dir, 'ep=' + str(epoch) + '.h5'))

    model.save_weights(os.path.join(log_dir, 'final_weights.h5'))
    return record_loss, record_iou_1, record_iou_2, record_iou_3, record_dice_1, record_dice_2, record_dice_3
Beispiel #21
0
    radius = 5
    sigma_sq = (3*10**(-5))**2
    Lambda_tv = 10
    D = dipole_kernel(patchSize, voxel_size, B0_dir)
    S = SMV_kernel(patchSize, voxel_size, radius)
    D = S*D

    GPU = 2

    if GPU == 1:
        rootDir = '/home/sdc/Jinwei/BayesianQSM'
    elif GPU == 2:
        rootDir = '/data/Jinwei/Bayesian_QSM/weight'

    # network
    unet3d = Unet(input_channels=1, output_channels=2, num_filters=[2**i for i in range(5, 10)])
    unet3d.to(device)
    # optimizer
    optimizer = optim.Adam(unet3d.parameters(), lr = lr, betas=(0.5, 0.999))
    # dataloader
    dataLoader = QSM_data_loader2(GPU=GPU, patchSize=patchSize)
    trainLoader = data.DataLoader(dataLoader, batch_size=1, shuffle=False)

    epoch = 0
    while epoch < niter:

        epoch += 1
        for idx, (input_RDFs, in_loss_RDFs, QSMs, Masks, \
            fidelity_Ws, gradient_Ws, flag_COSMOS) in enumerate(trainLoader):

            input_RDFs = input_RDFs[0, ...].to(device, dtype=torch.float)
Beispiel #22
0
def unet():
    model = Unet(num_classes=2, depth=3)
    return model
Beispiel #23
0
 def test_default_init(self):
     unet = Unet()
     unet.compile()
     unet.summary()
        print('Using simulated RDF')
    # parameters
    lr = 7e-4  # 7e-4 for HOBIT
    batch_size = 1
    B0_dir = (0, 0, 1)
    voxel_size = dataLoader_train.voxel_size
    volume_size = dataLoader_train.volume_size

    trainLoader = data.DataLoader(dataLoader_train,
                                  batch_size=batch_size,
                                  shuffle=True)

    # network of HOBIT
    unet3d = Unet(input_channels=1,
                  output_channels=1,
                  num_filters=[2**i for i in range(5, 10)],
                  use_deconv=1,
                  flag_rsa=0)
    unet3d.to(device0)
    weights_dict = torch.load(rootDir + '/weight_2nets/unet3d_fine.pt')
    # weights_dict = torch.load(rootDir+'/weight_2nets/linear_factor=1_validation=6_test=7_unet3d.pt')
    unet3d.load_state_dict(weights_dict)

    # QSMnet
    unet3d_ = Unet(input_channels=1,
                   output_channels=1,
                   num_filters=[2**i for i in range(5, 10)],
                   use_deconv=1,
                   flag_rsa=0)
    unet3d_.to(device0)
    weights_dict = torch.load(rootDir +
Beispiel #25
0
def validate(args):

    # Setup Dataloader
    data_json = json.load(open('config.json'))
    data_path = data_json[args.dataset]['data_path']

    v_loader = SaltLoader(data_path, split='val')
    train_df = v_loader.train_df

    val_loader = data.DataLoader(v_loader,
                                 batch_size=args.batch_size,
                                 num_workers=8)

    # load Model
    if args.arch == 'unet':
        model = Unet(start_fm=16)
    else:
        model = Unet_upsample(start_fm=16)
    model_path = data_json[args.model]['model_path']
    model.load_state_dict(torch.load(model_path)['model_state'])
    model.cuda()
    total = sum([param.nelement() for param in model.parameters()])
    print('Number of params: %.2fM' % (total / 1e6))

    #validate
    pred_list = []
    for images, masks in val_loader:
        images = Variable(images.cuda())
        y_preds = model(images)
        # print(y_preds.shape)
        y_preds_shaped = y_preds.reshape(-1, args.img_size_target,
                                         args.img_size_target)
        for idx in range(args.batch_size):
            y_pred = y_preds_shaped[idx]
            pred = torch.sigmoid(y_pred)
            pred = pred.cpu().data.numpy()
            pred_ori = resize(pred, (args.img_size_ori, args.img_size_ori),
                              mode='constant',
                              preserve_range=True)
            pred_list.append(pred_ori)

    preds_valid = np.array(pred_list)
    y_valid_ori = np.array(
        [train_df.loc[idx].masks for idx in v_loader.ids_valid])

    #jaccard score
    accuracies_best = 0.0
    for threshold in np.linspace(0, 1, 11):
        ious = []
        for y_pred, mask in zip(preds_valid, y_valid_ori):
            prediction = (y_pred > threshold).astype(int)
            iou = jaccard_similarity_score(mask.flatten(),
                                           prediction.flatten())
            ious.append(iou)

        accuracies = [
            np.mean(ious > iou_threshold)
            for iou_threshold in np.linspace(0.5, 0.95, 10)
        ]
        if accuracies_best < np.mean(accuracies):
            accuracies_best = np.mean(accuracies)
            threshold_best = threshold
        print('Threshold: %.1f, Metric: %.3f' %
              (threshold, np.mean(accuracies)))
    print("jaccard score gets threshold_best=", threshold_best)

    #other score way
    thresholds = np.linspace(0, 1, 50)
    ious = np.array([
        iou_metric_batch(y_valid_ori, np.int32(preds_valid > threshold))
        for threshold in tqdm_notebook(thresholds)
    ])
    #don't understand
    threshold_best_index = np.argmax(ious[9:-10]) + 9
    iou_best = ious[threshold_best_index]
    threshold_best = thresholds[threshold_best_index]
    print("other way gets iou_best=", iou_best, "threshold_best=",
          threshold_best)
Beispiel #26
0
print(data.shape)

####################################################
#           PREPARE Noise Model
####################################################

histogram = np.load(path + args.histogram)

# Create a NoiseModel object from the histogram.
noiseModel = hist_noise_model.NoiseModel(histogram)

####################################################
#           CREATE AND TRAIN NETWORK
####################################################

net = Unet(800, depth=args.netDepth)

# Split training and validation image.
my_train_data = data[:-5].copy()
np.random.shuffle(my_train_data)
my_val_data = data[-5:].copy()
np.random.shuffle(my_val_data)

# Start training.
train_hist, val_hist = training.train_network(
    net=net,
    train_data=my_train_data,
    val_data=my_val_data,
    postfix=args.name,
    directory=path,
    noise_model=noiseModel,
Beispiel #27
0
    voxel_size = dataLoader_train.voxel_size
    volume_size = dataLoader_train.volume_size
    S = SMV_kernel(volume_size, voxel_size, radius=5)
    D = dipole_kernel(volume_size, voxel_size, B0_dir)
    D = np.real(S * D)

    trainLoader = data.DataLoader(dataLoader_train,
                                  batch_size=batch_size,
                                  shuffle=True)

    # # network
    unet3d = Unet(input_channels=1,
                  output_channels=2,
                  num_filters=[2**i for i in range(3, 8)],
                  bilateral=1,
                  use_deconv=1,
                  use_deconv2=1,
                  renorm=1,
                  flag_r_train=flag_r_train)
    # unet3d = Unet(
    #     input_channels=1,
    #     output_channels=2,
    #     num_filters=[2**i for i in range(3, 8)],
    #     flag_rsa=2
    # )
    unet3d.to(device)
    if flag_init == 0:
        weights_dict = torch.load(
            rootDir +
            '/weight/weights_sigma={0}_smv={1}_mv8'.format(sigma, 1) + '.pt')
    else:
    image = cv2.resize(image, (cfg.input_shape[0], cfg.input_shape[1]))
    image = np.expand_dims(image, axis=0)
    # 3、GPU设置
    session_config = tf.ConfigProto(
        device_count={'GPU': 0},
        gpu_options={
            'allow_growth': 1,
            # 'per_process_gpu_memory_fraction': 0.1,
            'visible_device_list': '0'
        },
        allow_soft_placement=True)  ##这个设置必须有,否则无论如何都会报cudnn不匹配的错误,BUG十分隐蔽,真是智障

    with tf.Session(config=session_config) as sess:
        # 1、定义model
        model = Unet(sess, cfg, is_train=is_train)

        # 2、恢复模型
        model.restore(model_restore_name)

        # 3、预测
        since = time.time()
        pre = model.predict(image)
        seconds = time.time() - since

        # 4、调整维度
        pre_list = np.split(pre[0], batch_size, axis=0)
        image = np.squeeze(image, axis=0)
        pres = np.squeeze(pre_list, axis=0)
        pres = np.expand_dims(pres, axis=-1)
        result = np.multiply(pres, image)
Beispiel #29
0
    parser.add_argument('--case_test', type=int, default=7)
    parser.add_argument('--linear_factor', type=int, default=1)
    parser.add_argument('--weight_dir', type=str, default='weight_2nets')
    opt = {**vars(parser.parse_args())}

    os.environ['CUDA_VISIBLE_DEVICES'] = opt['gpu_id']
    t0 = time.time()

    device0 = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    device1 = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
    rootDir = '/data/Jinwei/Bayesian_QSM/' + opt['weight_dir']

    # network
    unet3d = Unet(
        input_channels=1,
        output_channels=1,
        num_filters=[2**i for i in range(5, 10)],  # or range(3, 8)
        use_deconv=1,
        flag_rsa=0)
    unet3d.to(device0)
    weights_dict = torch.load(rootDir +
                              '/linear_factor=1_validation=6_test=7_unet3d.pt')
    unet3d.load_state_dict(weights_dict)

    resnet = ResBlock(
        input_dim=2,
        filter_dim=32,
        output_dim=1,
    )
    resnet.to(device1)
    weights_dict = torch.load(rootDir +
                              '/linear_factor=1_validation=6_test=7_resnet.pt')
Beispiel #30
0
def test_unet_various_depths(depth, basic_image):
    model = Unet(1, depth)
    prediction = model(basic_image)
    assert prediction.get_shape() == (2, 128, 128, 1)