Beispiel #1
0
def inference_net():
    input_shape = (88, 808, 808)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_batched = tf.reshape(raw, (1, 1,) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(raw_batched, 12, 6, [[1, 3, 3], [1, 3, 3], [1, 3, 3]],
                                           [[(3, 3, 3), (3, 3, 3)], [(3, 3, 3), (3, 3, 3)],
                                            [(3, 3, 3), (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
                                           [[(3, 3, 3), (3, 3, 3)], [(3, 3, 3), (3, 3, 3)],
                                            [(3, 3, 3), (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
                                           voxel_size=(10, 1, 1), fov=(10, 1, 1))

    logits_batched, fov = unet.conv_pass(
        last_fmap,
        kernel_size=[[1, 1, 1]],
        num_fmaps=2,
        activation=None,
        fov=fov,
        voxel_size=anisotropy
    )

    output_shape_batched = logits_batched.get_shape().as_list()

    output_shape = output_shape_batched[1:]  # strip the batch dimension

    probabilities = tf.reshape(tf.nn.softmax(logits_batched, dim=1)[0], output_shape)
    predictions = tf.argmax(probabilities, axis=0)

    tf.train.export_meta_graph(filename='unet_inference.meta')
Beispiel #2
0
def inference_net():
    input_shape = (400, 400, 400)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_bc = tf.reshape(raw, (
        1,
        1,
    ) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(
        raw_bc,
        12,
        6, [[2, 2, 2], [2, 2, 2], [3, 3, 3]],
        [[(3, 3, 3),
          (3, 3, 3)], [(3, 3, 3),
                       (3, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        [[(3, 3, 3),
          (3, 3, 3)], [(3, 3, 3),
                       (3, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        voxel_size=(1, 1, 1),
        fov=(1, 1, 1))
    pred_raw_bc, fov = ops3d.conv_pass(last_fmap,
                                       kernel_size=[[1, 1, 1]],
                                       num_fmaps=1,
                                       activation=None,
                                       fov=fov,
                                       voxel_size=anisotropy)
    output_shape_bc = pred_raw_bc.get_shape().as_list()
    output_shape_c = output_shape_bc[1:]
    output_shape = output_shape_c[1:]

    pred_raw = tf.reshape(pred_raw_bc, output_shape)

    tf.train.export_meta_graph(filename='unet_inference.meta')
Beispiel #3
0
def inference_net():
    input_shape = (91, 862, 862)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_bc = tf.reshape(raw, (1, 1,) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(raw_bc, 12, 6, [[1, 3, 3], [1, 3, 3], [3, 3, 3]],
                                           [[(1, 3, 3), (1, 3, 3)], [(1, 3, 3), (1, 3, 3)],
                                            [(3, 3, 3), (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
                                           [[(1, 3, 3), (1, 3, 3)], [(1, 3, 3), (1, 3, 3)],
                                            [(3, 3, 3), (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
                                           voxel_size=(10, 1, 1), fov=(10, 1, 1))

    dist_bc, fov = ops3d.conv_pass(
            last_fmap,
            kernel_size=[[1, 1, 1]],
            num_fmaps=3,
            activation=None,
            fov=fov,
            voxel_size=anisotropy
            )
    output_shape_bc = dist_bc.get_shape().as_list()
    output_shape_c = output_shape_bc[1:]
    #output_shape = output_shape_c[1:]
    dist_c = tf.reshape(dist_bc, shape=output_shape_c)
    cleft_dist, pre_dist, post_dist = tf.unstack(dist_c, 3, axis=0)

    tf.train.export_meta_graph(filename='unet_inference.meta')
Beispiel #4
0
def train_net():
    input_shape = (196, 196, 196)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_bc = tf.reshape(raw, (
        1,
        1,
    ) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(
        raw_bc,
        12,
        6, [[2, 2, 2], [2, 2, 2], [3, 3, 3]],
        [[(3, 3, 3),
          (3, 3, 3)], [(3, 3, 3),
                       (3, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        [[(3, 3, 3),
          (3, 3, 3)], [(3, 3, 3),
                       (3, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        voxel_size=(1, 1, 1),
        fov=(1, 1, 1))
    pred_raw_bc, fov = ops3d.conv_pass(last_fmap,
                                       kernel_size=[[1, 1, 1]],
                                       num_fmaps=1,
                                       activation=None,
                                       fov=fov,
                                       voxel_size=anisotropy)
    output_shape_bc = pred_raw_bc.get_shape().as_list()
    output_shape_c = output_shape_bc[1:]
    output_shape = output_shape_c[1:]

    pred_raw = tf.reshape(pred_raw_bc, output_shape)

    gt_raw_bc = ops3d.crop_zyx(raw_bc, output_shape_bc)
    gt_raw = tf.reshape(gt_raw_bc, output_shape)

    loss = tf.losses.mean_squared_error(gt_raw, pred_raw)
    tf.summary.scalar('loss', loss)

    opt = tf.train.AdamOptimizer(learning_rate=0.5e-4,
                                 beta1=0.95,
                                 beta2=0.999,
                                 epsilon=1e-8)
    optimizer = opt.minimize(loss)

    merged = tf.summary.merge_all()
    tf.train.export_meta_graph(filename='unet.meta')

    names = {
        'raw': raw.name,
        'pred_raw': pred_raw.name,
        'optimizer': optimizer.name,
        'summary': merged.name,
        'loss': loss.name
    }
    with open('net_io_names.json', 'w') as f:
        json.dump(names, f)
Beispiel #5
0
def inference_net():
    input_shape = (91, 862, 862)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_bc = tf.reshape(raw, (1, 1) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(
        raw_bc,
        12,
        6,
        [[1, 3, 3], [1, 3, 3], [3, 3, 3]],
        [
            [(1, 3, 3), (1, 3, 3)],
            [(1, 3, 3), (1, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        [
            [(1, 3, 3), (1, 3, 3)],
            [(1, 3, 3), (1, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        voxel_size=(10, 1, 1),
        fov=(10, 1, 1),
    )

    logits_bc, fov = ops3d.conv_pass(
        last_fmap,
        kernel_size=[[1, 1, 1]],
        num_fmaps=2,
        activation=None,
        fov=fov,
        voxel_size=anisotropy,
    )

    output_shape_bc = logits_bc.get_shape().as_list()

    output_shape_c = output_shape_bc[1:]  # strip the batch dimension
    output_shape = output_shape_c[1:]  # strip the channel dimension

    probabilities = tf.reshape(tf.nn.softmax(logits_bc, dim=1)[0], output_shape_c)
    predictions = tf.argmax(probabilities, axis=0)
    print(probabilities.name)

    tf.train.export_meta_graph(filename="unet_inference.meta")
Beispiel #6
0
def inference_net():
    input_shape = (91, 862, 862)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_bc = tf.reshape(raw, (1, 1) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(
        raw_bc,
        12,
        [3, 3, 6],
        [[1, 3, 3], [1, 3, 3], [3, 3, 3]],
        [
            [(1, 3, 3), (1, 3, 3)],
            [(1, 3, 3), (1, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        [
            [(1, 3, 3), (1, 3, 3)],
            [(1, 3, 3), (1, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        voxel_size=(10, 1, 1),
        fov=(10, 1, 1),
    )

    dist_bc, fov = ops3d.conv_pass(
        last_fmap,
        kernel_size=[[1, 1, 1]],
        num_fmaps=1,
        activation=None,
        fov=fov,
        voxel_size=anisotropy,
    )

    output_shape_bc = dist_bc.get_shape().as_list()

    output_shape_c = output_shape_bc[1:]
    output_shape = output_shape_c[1:]

    dist = tf.reshape(dist_bc, output_shape)

    tf.train.export_meta_graph(filename="unet_inference.meta")
Beispiel #7
0
def inference_net():
    input_shape = (88, 808, 808)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_batched = tf.reshape(raw, (
        1,
        1,
    ) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(
        raw_batched,
        12,
        6, [[1, 3, 3], [1, 3, 3], [1, 3, 3]],
        [[(3, 3, 3),
          (3, 3, 3)], [(3, 3, 3),
                       (3, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        [[(3, 3, 3),
          (3, 3, 3)], [(3, 3, 3),
                       (3, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        voxel_size=(10, 1, 1),
        fov=(10, 1, 1))

    dist_batched, fov = unet.conv_pass(last_fmap,
                                       kernel_size=[[1, 1, 1]],
                                       num_fmaps=1,
                                       activation=None,
                                       fov=fov,
                                       voxel_size=anisotropy)

    output_shape_batched = dist_batched.get_shape().as_list()

    output_shape = output_shape_batched[1:]

    dist = tf.reshape(dist_batched, output_shape)

    tf.train.export_meta_graph(filename='unet_inference.meta')
Beispiel #8
0
def train_net():
    input_shape = (84, 268, 268)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_batched = tf.reshape(raw, (
        1,
        1,
    ) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(
        raw_batched,
        12,
        6, [[1, 3, 3], [1, 3, 3], [1, 3, 3]],
        [[(3, 3, 3),
          (3, 3, 3)], [(3, 3, 3),
                       (3, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        [[(3, 3, 3),
          (3, 3, 3)], [(3, 3, 3),
                       (3, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        voxel_size=(10, 1, 1),
        fov=(10, 1, 1))

    dist_batched, fov = unet.conv_pass(last_fmap,
                                       kernel_size=[[1, 1, 1]],
                                       num_fmaps=1,
                                       activation=None,
                                       fov=fov,
                                       voxel_size=anisotropy)

    output_shape_batched = dist_batched.get_shape().as_list()

    output_shape = output_shape_batched[1:]  # strip the batch dimension

    dist = tf.reshape(dist_batched, output_shape)

    gt_dist = tf.placeholder(tf.float32, shape=output_shape)

    loss_weights = tf.placeholder(tf.float32, shape=output_shape[1:])
    loss_weights_batched = tf.reshape(loss_weights, shape=output_shape)

    loss_balanced = tf.losses.mean_squared_error(gt_dist, dist,
                                                 loss_weights_batched)
    tf.summary.scalar('loss_balanced_syn', loss_balanced)

    loss_unbalanced = tf.losses.mean_squared_error(gt_dist, dist)
    tf.summary.scalar('loss_unbalanced_syn', loss_unbalanced)

    opt = tf.train.AdamOptimizer(learning_rate=0.5e-4,
                                 beta1=0.95,
                                 beta2=0.999,
                                 epsilon=1e-8)

    optimizer = opt.minimize(loss_balanced)
    merged = tf.summary.merge_all()

    tf.train.export_meta_graph(filename='unet.meta')

    names = {
        'raw': raw.name,
        'dist': dist.name,
        'gt_dist': gt_dist.name,
        'loss_balanced_syn': loss_balanced.name,
        'loss_unbalanced_syn': loss_unbalanced.name,
        'loss_weights': loss_weights.name,
        'optimizer': optimizer.name,
        'summary': merged.name
    }

    with open('net_io_names.json', 'w') as f:
        json.dump(names, f)
Beispiel #9
0
def train_net():
    input_shape = (43, 430, 430)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_bc = tf.reshape(raw, (1, 1) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(
        raw_bc,
        12,
        [3, 3, 6],
        [[1, 3, 3], [1, 3, 3], [3, 3, 3]],
        [
            [(1, 3, 3), (1, 3, 3)],
            [(1, 3, 3), (1, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        [
            [(1, 3, 3), (1, 3, 3)],
            [(1, 3, 3), (1, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        voxel_size=(10, 1, 1),
        fov=(10, 1, 1),
    )

    dist_bc, fov = ops3d.conv_pass(
        last_fmap,
        kernel_size=[[1, 1, 1]],
        num_fmaps=1,
        activation=None,
        fov=fov,
        voxel_size=anisotropy,
    )

    output_shape_bc = dist_bc.get_shape().as_list()

    output_shape_c = output_shape_bc[1:]  # strip the batch dimension
    output_shape = output_shape_c[1:]  # strip the channel dimension

    dist = tf.reshape(dist_bc, output_shape)

    gt_dist = tf.placeholder(tf.float32, shape=output_shape)

    loss_weights = tf.placeholder(tf.float32, shape=output_shape)
    mask = tf.placeholder(tf.float32, shape=output_shape)

    loss_balanced = tf.losses.mean_squared_error(gt_dist, dist, loss_weights)
    tf.summary.scalar("loss_balanced_syn", loss_balanced)

    loss_unbalanced = tf.losses.mean_squared_error(gt_dist, dist, mask)
    tf.summary.scalar("loss_unbalanced_syn", loss_unbalanced)

    opt = tf.train.AdamOptimizer(learning_rate=0.5e-4,
                                 beta1=0.95,
                                 beta2=0.999,
                                 epsilon=1e-8)

    optimizer = opt.minimize(loss_balanced)
    merged = tf.summary.merge_all()

    tf.train.export_meta_graph(filename="unet.meta")

    names = {
        "raw": raw.name,
        "dist": dist.name,
        "gt_dist": gt_dist.name,
        "loss_balanced_syn": loss_balanced.name,
        "loss_unbalanced_syn": loss_unbalanced.name,
        "loss_weights": loss_weights.name,
        "mask": mask.name,
        "optimizer": optimizer.name,
        "summary": merged.name,
    }

    with open("net_io_names.json", "w") as f:
        json.dump(names, f)
Beispiel #10
0
def train_net():

    # z    [1, 1, 1]:  66 ->  38 -> 10
    # y, x [2, 2, 2]: 228 -> 140 -> 52
    shape_0 = (220,) * 3
    shape_1 = (132,) * 3
    shape_2 = (44,) * 3

    affs_0_bc = tf.ones((1, 3) + shape_0) * 0.5

    with tf.variable_scope("autocontext") as scope:

        # phase 1
        raw_0 = tf.placeholder(tf.float32, shape=shape_0)
        raw_0_bc = tf.reshape(raw_0, (1, 1) + shape_0)

        input_0 = tf.concat([raw_0_bc, affs_0_bc], 1)

        out_bc, fov, anisotropy = unet.unet(
            input_0,
            24,
            3,
            [[2, 2, 2], [2, 2, 2], [2, 2, 2]],
            [
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
            ],
            [
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
            ],
        )

        affs_1_bc, fov = ops3d.conv_pass(
            out_bc,
            kernel_size=[[1, 1, 1]],
            num_fmaps=3,
            activation="sigmoid",
            fov=fov,
            voxel_size=anisotropy,
        )

        affs_1_c = tf.reshape(affs_1_bc, (3,) + shape_1)
        gt_affs_1_c = tf.placeholder(tf.float32, shape=(3,) + shape_1)
        loss_weights_1_c = tf.placeholder(tf.float32, shape=(3,) + shape_1)

        loss_1 = tf.losses.mean_squared_error(gt_affs_1_c, affs_1_c, loss_weights_1_c)

        # phase 2
        tf.summary.scalar("loss_pred0", loss_1)
        scope.reuse_variables()

        raw_1 = ops3d.center_crop(raw_0, shape_1)
        raw_1_bc = tf.reshape(raw_1, (1, 1) + shape_1)

        input_1 = tf.concat([raw_1_bc, affs_1_bc], 1)

        out_bc, fov, anisotropy = unet.unet(
            input_1,
            24,
            3,
            [[2, 2, 2], [2, 2, 2], [2, 2, 2]],
            [
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
            ],
            [
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
                [(3, 3, 3), (3, 3, 3)],
            ],
            fov=fov,
            voxel_size=anisotropy,
        )

        affs_2_bc, fov = ops3d.conv_pass(
            out_bc,
            kernel_size=[[1, 1, 1]],
            num_fmaps=3,
            activation="sigmoid",
            fov=fov,
            voxel_size=anisotropy,
        )

        affs_2_c = tf.reshape(affs_2_bc, (3,) + shape_2)
        gt_affs_2_c = ops3d.center_crop(gt_affs_1_c, (3,) + shape_2)
        loss_weights_2_c = ops3d.center_crop(loss_weights_1_c, (3,) + shape_2)

        loss_2 = tf.losses.mean_squared_error(gt_affs_2_c, affs_2_c, loss_weights_2_c)
        tf.summary.scalar("loss_pred1", loss_2)
    loss = loss_1 + loss_2
    tf.summary.scalar("loss_total", loss)
    tf.summary.scalar("loss_diff", loss_1 - loss_2)
    for trainable in tf.trainable_variables():
        custom_ops.tf_var_summary(trainable)
    merged = tf.summary.merge_all()

    opt = tf.train.AdamOptimizer(
        learning_rate=0.5e-4, beta1=0.95, beta2=0.999, epsilon=1e-8
    )
    optimizer = opt.minimize(loss)

    tf.train.export_meta_graph(filename="wnet.meta")

    names = {
        "raw": raw_0.name,
        "affs_1": affs_1_c.name,
        "affs_2": affs_2_c.name,
        "gt_affs": gt_affs_1_c.name,
        "loss_weights": loss_weights_1_c.name,
        "loss": loss.name,
        "optimizer": optimizer.name,
        "summary": merged.name,
    }
    with open("net_io_names.json", "w") as f:
        json.dump(names, f)
Beispiel #11
0
    if args.network_arch == 'resnet':
        net = ResnetGenerator(args.bands, 6, n_blocks=args.resnet_blocks)
    elif args.network_arch == 'segnet':
        if args.mini == True:
            net = segnetm(args.bands, 6)
        else:
            net = segnet(args.bands, 6)
    elif args.network_arch == 'unet':
        if args.use_mini == True:
            net = unetm(args.bands,
                        6,
                        use_SE=args.use_SE,
                        use_PReLU=args.use_preluSE)
        else:
            net = unet(args.bands, 6)
    else:
        raise NotImplementedError('required parameter not found in dictionary')

    init_weights(net, init_type=args.init_weights)
    if args.pretrained_weights is not None:
        load_weights(net, args.pretrained_weights)
        print('Completed loading pretrained network weights')

    net.to(device)

    optimizer = optim.Adam(net.parameters(), lr=args.learning_rate)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[40, 50])

    trainloss = []
    valloss = []
Beispiel #12
0
def train_net():
    input_shape = (84, 268, 268)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_batched = tf.reshape(raw, (1, 1,) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(raw_batched, 12, 6, [[1, 3, 3], [1, 3, 3], [1, 3, 3]],
                                           [[(3, 3, 3), (3, 3, 3)], [(3, 3, 3), (3, 3, 3)],
                                            [(3, 3, 3), (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
                                           [[(3, 3, 3), (3, 3, 3)], [(3, 3, 3), (3, 3, 3)],
                                            [(3, 3, 3), (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
                                           voxel_size=(10, 1, 1), fov=(10, 1, 1))

    logits_batched, fov = unet.conv_pass(
        last_fmap,
        kernel_size=[[1, 1, 1]],
        num_fmaps=2,
        activation=None,
        fov=fov,
        voxel_size=anisotropy
    )

    output_shape_batched = logits_batched.get_shape().as_list()

    output_shape = output_shape_batched[1:]  # strip the batch dimension
    flat_logits = tf.transpose(tf.reshape(tensor=logits_batched, shape=(2,-1)))


    gt_labels = tf.placeholder(tf.float32, shape=output_shape[1:])
    gt_labels_flat = tf.reshape(gt_labels, (-1,))

    gt_bg = tf.to_float(tf.not_equal(gt_labels_flat, 1))
    flat_ohe = tf.stack(values=[gt_labels_flat, gt_bg], axis=1)
    
    loss_weights = tf.placeholder(tf.float32, shape=output_shape[1:])
    loss_weights_flat = tf.reshape(loss_weights, (-1,))
    print(logits_batched.get_shape().as_list())
    probabilities = tf.reshape(tf.nn.softmax(logits_batched, dim=1)[0], output_shape)
    predictions = tf.argmax(probabilities, axis=0)

    ce_loss_balanced = tf.losses.softmax_cross_entropy(flat_ohe, flat_logits, weights=loss_weights_flat)
    ce_loss_unbalanced = tf.losses.softmax_cross_entropy(flat_ohe, flat_logits)
    tf.summary.scalar('loss_balanced_syn', ce_loss_balanced)
    tf.summary.scalar('loss_unbalanced_syn', ce_loss_unbalanced)

    opt = tf.train.AdamOptimizer(
        learning_rate=0.5e-4,
        beta1=0.95,
        beta2=0.999,
        epsilon=1e-8)

    optimizer = opt.minimize(ce_loss_balanced)
    merged = tf.summary.merge_all()

    tf.train.export_meta_graph(filename='unet.meta')

    names = {
        'raw': raw.name,
        'probabilities': probabilities.name,
        'predictions': predictions.name,
        'gt_labels': gt_labels.name,
        'loss_balanced_syn': ce_loss_balanced.name,
        'loss_unbalanced_syn': ce_loss_unbalanced.name,
        'loss_weights': loss_weights.name,
        'optimizer': optimizer.name,
        'summary': merged.name}

    with open('net_io_names.json', 'w') as f:
        json.dump(names, f)
def train_net():

    # z    [1, 1, 1]:  66 ->  38 -> 10
    # y, x [2, 2, 2]: 228 -> 140 -> 52
    shape_0 = (220, ) * 3
    shape_1 = (132, ) * 3
    shape_2 = (44, ) * 3
    ignore = False

    affs_0_bc = tf.ones((
        1,
        3,
    ) + shape_0) * 0.5

    with tf.variable_scope('autocontext') as scope:

        # phase 1
        raw_0 = tf.placeholder(tf.float32, shape=shape_0)
        raw_0_bc = tf.reshape(raw_0, (1, 1) + shape_0)

        input_0_bc = tf.concat([raw_0_bc, affs_0_bc], 1)
        if ignore:
            keep_raw_bc = tf.ones_like(raw_0_bc)
            ignore_aff_bc = tf.zeros_like(affs_0_bc)
            ignore_mask_bc = tf.concat([keep_raw_bc, ignore_aff_bc], 1)
            input_0_bc = custom_ops.ignore(input_0_bc, ignore_mask_bc)

        out_bc, fov, anisotropy = unet.unet(
            input_0_bc, 24, 3, [[2, 2, 2], [2, 2, 2], [2, 2, 2]],
            [[(3, 3, 3),
              (3, 3, 3)], [(3, 3, 3),
                           (3, 3, 3)], [(3, 3, 3),
                                        (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
            [[(3, 3, 3),
              (3, 3, 3)], [(3, 3, 3),
                           (3, 3, 3)], [(3, 3, 3),
                                        (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]])

        affs_1_bc, fov = ops3d.conv_pass(out_bc,
                                         kernel_size=[[1, 1, 1]],
                                         num_fmaps=3,
                                         activation='sigmoid',
                                         fov=fov,
                                         voxel_size=anisotropy)

        affs_1_c = tf.reshape(affs_1_bc, (3, ) + shape_1)
        gt_affs_1_c = tf.placeholder(tf.float32, shape=(3, ) + shape_1)
        loss_weights_1_c = tf.placeholder(tf.float32, shape=(3, ) + shape_1)

        loss_1 = tf.losses.mean_squared_error(gt_affs_1_c, affs_1_c,
                                              loss_weights_1_c)

        # phase 2
        tf.summary.scalar('loss_pred0', loss_1)
        scope.reuse_variables()
        tf.stop_gradient(affs_1_bc)
        raw_1 = ops3d.center_crop(raw_0, shape_1)
        raw_1_bc = tf.reshape(raw_1, (1, 1) + shape_1)

        input_1_bc = tf.concat([raw_1_bc, affs_1_bc], 1)

        out_bc, fov, anisotropy = unet.unet(
            input_1_bc,
            24,
            3, [[2, 2, 2], [2, 2, 2], [2, 2, 2]],
            [[(3, 3, 3),
              (3, 3, 3)], [(3, 3, 3),
                           (3, 3, 3)], [(3, 3, 3),
                                        (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
            [[(3, 3, 3),
              (3, 3, 3)], [(3, 3, 3),
                           (3, 3, 3)], [(3, 3, 3),
                                        (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
            fov=fov,
            voxel_size=anisotropy)

        affs_2_bc, fov = ops3d.conv_pass(out_bc,
                                         kernel_size=[[1, 1, 1]],
                                         num_fmaps=3,
                                         activation='sigmoid',
                                         fov=fov,
                                         voxel_size=anisotropy)

        affs_2_c = tf.reshape(affs_2_bc, (3, ) + shape_2)
        gt_affs_2_c = ops3d.center_crop(gt_affs_1_c, (3, ) + shape_2)
        loss_weights_2_c = ops3d.center_crop(loss_weights_1_c, (3, ) + shape_2)

        loss_2 = tf.losses.mean_squared_error(gt_affs_2_c, affs_2_c,
                                              loss_weights_2_c)
        tf.summary.scalar('loss_pred1', loss_2)
    loss = loss_1 + loss_2
    tf.summary.scalar('loss_total', loss)
    tf.summary.scalar('loss_diff', loss_1 - loss_2)
    for trainable in tf.trainable_variables():
        custom_ops.tf_var_summary(trainable)
    merged = tf.summary.merge_all()

    opt = tf.train.AdamOptimizer(learning_rate=0.5e-4,
                                 beta1=0.95,
                                 beta2=0.999,
                                 epsilon=1e-8)
    optimizer = opt.minimize(loss)

    tf.train.export_meta_graph(filename='wnet.meta')

    names = {
        'raw': raw_0.name,
        'affs_1': affs_1_c.name,
        'affs_2': affs_2_c.name,
        'gt_affs': gt_affs_1_c.name,
        'loss_weights': loss_weights_1_c.name,
        'loss': loss.name,
        'optimizer': optimizer.name,
        'summary': merged.name
    }
    with open('net_io_names.json', 'w') as f:
        json.dump(names, f)
Beispiel #14
0
def train_net():
    input_shape = (43, 430, 430)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_bc = tf.reshape(raw, (1, 1) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(
        raw_bc,
        12,
        6,
        [[1, 3, 3], [1, 3, 3], [3, 3, 3]],
        [
            [(1, 3, 3), (1, 3, 3)],
            [(1, 3, 3), (1, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        [
            [(1, 3, 3), (1, 3, 3)],
            [(1, 3, 3), (1, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        voxel_size=(10, 1, 1),
        fov=(10, 1, 1),
    )

    dist_bc, fov = ops3d.conv_pass(
        last_fmap,
        kernel_size=[[1, 1, 1]],
        num_fmaps=3,
        activation=None,
        fov=fov,
        voxel_size=anisotropy,
    )
    output_shape_bc = dist_bc.get_shape().as_list()
    output_shape_c = output_shape_bc[1:]
    output_shape = output_shape_c[1:]
    dist_c = tf.reshape(dist_bc, shape=output_shape_c)
    cleft_dist, pre_dist, post_dist = tf.unstack(dist_c, 3, axis=0)

    gt_cleft_dist = tf.placeholder(tf.float32, shape=output_shape)
    gt_pre_dist = tf.placeholder(tf.float32, shape=output_shape)
    gt_post_dist = tf.placeholder(tf.float32, shape=output_shape)

    loss_weights_cleft = tf.placeholder(tf.float32, shape=output_shape)
    loss_weights_pre = tf.placeholder(tf.float32, shape=output_shape)
    loss_weights_post = tf.placeholder(tf.float32, shape=output_shape)
    mask = tf.placeholder(tf.float32, shape=output_shape)

    loss_balanced_cleft = tf.losses.mean_squared_error(gt_cleft_dist,
                                                       cleft_dist,
                                                       loss_weights_cleft)
    loss_balanced_pre = tf.losses.mean_squared_error(gt_pre_dist, pre_dist,
                                                     loss_weights_pre)
    loss_balanced_post = tf.losses.mean_squared_error(gt_post_dist, post_dist,
                                                      loss_weights_post)
    loss_unbalanced_cleft = tf.losses.mean_squared_error(
        gt_cleft_dist, cleft_dist, mask)
    loss_unbalanced_pre = tf.losses.mean_squared_error(gt_pre_dist, pre_dist,
                                                       mask)
    loss_unbalanced_post = tf.losses.mean_squared_error(
        gt_post_dist, post_dist, mask)

    loss_total = loss_balanced_cleft + loss_unbalanced_pre + loss_unbalanced_post
    tf.summary.scalar("loss_balanced_syn", loss_balanced_cleft)
    tf.summary.scalar("loss_balanced_pre", loss_balanced_pre)
    tf.summary.scalar("loss_balanced_post", loss_balanced_post)

    tf.summary.scalar("loss_unbalanced_syn", loss_unbalanced_cleft)
    tf.summary.scalar("loss_unbalanced_pre", loss_unbalanced_pre)
    tf.summary.scalar("loss_unbalanced_post", loss_unbalanced_post)
    tf.summary.scalar("loss_total", loss_total)

    opt = tf.train.AdamOptimizer(learning_rate=0.5e-4,
                                 beta1=0.95,
                                 beta2=0.999,
                                 epsilon=1e-8)

    optimizer = opt.minimize(loss_total)
    merged = tf.summary.merge_all()

    tf.train.export_meta_graph(filename="unet.meta")

    names = {
        "raw": raw.name,
        "cleft_dist": cleft_dist.name,
        "pre_dist": pre_dist.name,
        "post_dist": post_dist.name,
        "gt_cleft_dist": gt_cleft_dist.name,
        "gt_pre_dist": gt_pre_dist.name,
        "gt_post_dist": gt_post_dist.name,
        "loss_balanced_cleft": loss_balanced_cleft.name,
        "loss_balanced_pre": loss_balanced_pre.name,
        "loss_balanced_post": loss_balanced_post.name,
        "loss_unbalanced_cleft": loss_unbalanced_cleft.name,
        "loss_unbalanced_pre": loss_unbalanced_pre.name,
        "loss_unbalanced_post": loss_unbalanced_post.name,
        "loss_total": loss_total.name,
        "loss_weights_cleft": loss_weights_cleft.name,
        "loss_weights_pre": loss_weights_pre.name,
        "loss_weights_post": loss_weights_post.name,
        "mask": mask.name,
        "optimizer": optimizer.name,
        "summary": merged.name,
    }

    with open("net_io_names.json", "w") as f:
        json.dump(names, f)
Beispiel #15
0
if __name__ == "__main__":

    raw = tf.placeholder(tf.float32, shape=(196, ) * 3)
    raw_bc = tf.reshape(raw, (1, 1) + (196, ) * 3)

    last_fmap, fov, anisotropy = unet.unet(
        raw_bc,
        12,
        6,
        [[2, 2, 2], [2, 2, 2], [3, 3, 3]],
        [
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        [
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        voxel_size=(1, 1, 1),
        fov=(1, 1, 1),
    )

    dist_bc, fov = ops3d.conv_pass(
        last_fmap,
        kernel_size=[[1, 1, 1]],
        num_fmaps=1,
        activation=None,
Beispiel #16
0
                              model_config['resize_dim'])

        if args.verbose:
            print("{} Creating Data Generator...".format(current_time))

        train_generator = data.data_generator(
            train_X,
            train_y,
            resize_dim=model_config['resize_dim'],
            batch_size=model_config['batch_size'])
        validation_data = data.preprocess_data(val_X, val_y)

        if args.verbose:
            print("{} Setting up Model...\n".format(current_time))

        model = unet.unet(batchnorm=model_config['batchnorm'],
                          dropout=model_config['dropout'])

        #Print a model summary
        if args.verbose:
            print("Model Summary:")
            print(model.summary())

        if args.verbose:
            print("\n{} Compiling Model...".format(current_time))

        optimizer = Adam(lr=0.01)
        model.compile(optimizer=optimizer,
                      loss='binary_crossentropy',
                      metrics=[MeanIoU(num_classes=2)])

    if args.model == 'alexnet':
Beispiel #17
0
def train_net():
    input_shape = (43, 430, 430)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_bc = tf.reshape(raw, (
        1,
        1,
    ) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(
        raw_bc,
        12,
        6, [[1, 3, 3], [1, 3, 3], [3, 3, 3]],
        [[(1, 3, 3),
          (1, 3, 3)], [(1, 3, 3),
                       (1, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        [[(1, 3, 3),
          (1, 3, 3)], [(1, 3, 3),
                       (1, 3, 3)], [(3, 3, 3),
                                    (3, 3, 3)], [(3, 3, 3), (3, 3, 3)]],
        voxel_size=(10, 1, 1),
        fov=(10, 1, 1))

    dist_bc, fov = ops3d.conv_pass(last_fmap,
                                   kernel_size=[[1, 1, 1]],
                                   num_fmaps=2,
                                   activation=None,
                                   fov=fov,
                                   voxel_size=anisotropy)

    output_shape_bc = dist_bc.get_shape().as_list()
    output_shape_c = output_shape_bc[1:]
    output_shape = output_shape_c[1:]
    dist_c = tf.reshape(dist_bc, output_shape_c)
    syn_dist, bdy_dist = tf.unstack(dist_c, 2, axis=0)

    gt_syn_dist = tf.placeholder(tf.float32, shape=output_shape)
    gt_bdy_dist = tf.placeholder(tf.float32, shape=output_shape)

    loss_weights = tf.placeholder(tf.float32, shape=output_shape)
    mask = tf.placeholder(tf.float32, shape=output_shape)

    loss_balanced_syn = tf.losses.mean_squared_error(gt_syn_dist, syn_dist,
                                                     loss_weights)
    loss_bdy = tf.losses.mean_squared_error(gt_bdy_dist, bdy_dist)
    loss_total = loss_balanced_syn + loss_bdy
    tf.summary.scalar('loss_balanced_syn', loss_balanced_syn)
    tf.summary.scalar('loss_bdy', loss_bdy)
    tf.summary.scalar('loss_total', loss_total)

    loss_unbalanced = tf.losses.mean_squared_error(gt_syn_dist, syn_dist, mask)
    tf.summary.scalar('loss_unbalanced_syn', loss_unbalanced)

    opt = tf.train.AdamOptimizer(learning_rate=0.5e-4,
                                 beta1=0.95,
                                 beta2=0.999,
                                 epsilon=1e-8)

    optimizer = opt.minimize(loss_total)
    merged = tf.summary.merge_all()

    tf.train.export_meta_graph(filename='unet.meta')

    names = {
        'raw': raw.name,
        'syn_dist': syn_dist.name,
        'bdy_dist': bdy_dist.name,
        'gt_syn_dist': gt_syn_dist.name,
        'gt_bdy_dist': gt_bdy_dist.name,
        'loss_balanced_syn': loss_balanced_syn.name,
        'loss_unbalanced_syn': loss_unbalanced.name,
        'loss_bdy': loss_bdy.name,
        'loss_total': loss_total.name,
        'loss_weights': loss_weights.name,
        'mask': mask.name,
        'optimizer': optimizer.name,
        'summary': merged.name
    }

    with open('net_io_names.json', 'w') as f:
        json.dump(names, f)
Beispiel #18
0
def train_net():
    input_shape = (43, 430, 430)
    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_bc = tf.reshape(raw, (1, 1) + input_shape)

    last_fmap, fov, anisotropy = unet.unet(
        raw_bc,
        12,
        6,
        [[1, 3, 3], [1, 3, 3], [3, 3, 3]],
        [
            [(1, 3, 3), (1, 3, 3)],
            [(1, 3, 3), (1, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        [
            [(1, 3, 3), (1, 3, 3)],
            [(1, 3, 3), (1, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
            [(3, 3, 3), (3, 3, 3)],
        ],
        voxel_size=(10, 1, 1),
        fov=(10, 1, 1),
    )

    logits_bc, fov = ops3d.conv_pass(
        last_fmap,
        kernel_size=[[1, 1, 1]],
        num_fmaps=2,
        activation=None,
        fov=fov,
        voxel_size=anisotropy,
    )

    output_shape_bc = logits_bc.get_shape().as_list()

    output_shape_c = output_shape_bc[1:]  # strip the batch dimension
    output_shape = output_shape_c[1:]  # strip the channel dimension
    flat_logits = tf.transpose(tf.reshape(tensor=logits_bc, shape=(2, -1)))

    gt_labels = tf.placeholder(tf.float32, shape=output_shape)
    gt_labels_flat = tf.reshape(gt_labels, (-1,))

    gt_bg = tf.to_float(tf.not_equal(gt_labels_flat, 1))
    flat_ohe = tf.stack(values=[gt_labels_flat, gt_bg], axis=1)

    loss_weights = tf.placeholder(tf.float32, shape=output_shape)
    loss_weights_flat = tf.reshape(loss_weights, (-1,))

    mask = tf.placeholder(tf.float32, shape=output_shape)
    mask_flat = tf.reshape(mask, (-1,))

    probabilities = tf.reshape(tf.nn.softmax(logits_bc, dim=1)[0], output_shape_c)
    predictions = tf.argmax(probabilities, axis=0)

    ce_loss_balanced = tf.losses.softmax_cross_entropy(
        flat_ohe, flat_logits, weights=loss_weights_flat
    )
    ce_loss_unbalanced = tf.losses.softmax_cross_entropy(
        flat_ohe, flat_logits, weights=mask_flat
    )
    tf.summary.scalar("loss_balanced_syn", ce_loss_balanced)
    tf.summary.scalar("loss_unbalanced_syn", ce_loss_unbalanced)

    opt = tf.train.AdamOptimizer(
        learning_rate=0.5e-4, beta1=0.95, beta2=0.999, epsilon=1e-8
    )

    optimizer = opt.minimize(ce_loss_balanced)
    merged = tf.summary.merge_all()

    tf.train.export_meta_graph(filename="unet.meta")

    names = {
        "raw": raw.name,
        "probabilities": probabilities.name,
        "predictions": predictions.name,
        "gt_labels": gt_labels.name,
        "loss_balanced_syn": ce_loss_balanced.name,
        "loss_unbalanced_syn": ce_loss_unbalanced.name,
        "loss_weights": loss_weights.name,
        "mask": mask.name,
        "optimizer": optimizer.name,
        "summary": merged.name,
    }

    with open("net_io_names.json", "w") as f:
        json.dump(names, f)