Exemple #1
0
def get_unet(input_shape, pool_cnt, filter_cnt):
    inputs = Input(shape=input_shape, name='input')
    layer = inputs

    skip_connections = []
    for _ in range(pool_cnt):
        for _ in range(2):
            layer = conv_bn_relu(layer, filter_cnt)
        skip_connections.append(layer)
        layer = MaxPooling2D(2, strides=2)(layer)
        filter_cnt *= 2

    for _ in range(2):
        layer = conv_bn_relu(layer, filter_cnt)

    for _ in range(pool_cnt):
        filter_cnt //= 2
        layer = UpSampling2D(2)(layer)
        layer = concatenate([layer, skip_connections.pop()], axis=3)
        for _ in range(3):
            layer = conv_bn_relu(layer, filter_cnt)

    output = Conv2D(1, 1, activation='sigmoid', name='output')(layer)

    model = Model(inputs=inputs, outputs=output)
    model.compile(optimizer=SGD(0.001, momentum=0.9, nesterov=True),
                  loss=bce_dice_loss,
                  metrics=[dice_coef, false_positive, false_negative])
    model.summary()

    return model
Exemple #2
0
 def _get_model(self):
     x = tf.keras.Input(shape=(None, None, 3))
     a = InputNormalize()(x)
     #a = ReflectionPadding2D(padding=(40,40),input_shape=(img_width,img_height,3))(a)
     a = conv_bn_relu(8, 9, 9, stride=(1, 1))(a)
     a = conv_bn_relu(16, 3, 3, stride=(2, 2))(a)
     a = conv_bn_relu(32, 3, 3, stride=(2, 2))(a)
     for i in range(2):
         a = res_conv(32, 3, 3)(a)
     a = dconv_bn_nolinear(16, 3, 3)(a)
     a = dconv_bn_nolinear(8, 3, 3)(a)
     a = dconv_bn_nolinear(3, 9, 9, stride=(1, 1), activation="tanh")(a)
     # Scale output to range [0, 255] via custom Denormalize layer
     y = Denormalize(name='transform_output')(a)
     return tf.keras.Model(x, y, name="transformnet")
def pt_regressor_conv(layer_in, flags):
    """ Return standard convolutional polar transform origin regressor """
    nfilters = [int(x) for x in flags.ptreg_nfilters.split(",")]
    strides = [int(x) for x in flags.ptreg_strides.split(",")]
    weights_init = flags.weights_init

    # first block is always conv_bn_relu
    name1st = 'ptreg_conv0'
    first = layers.conv_bn_relu(layer_in,
                                nfilters[0],
                                3,
                                name1st,
                                strides=strides[0],
                                padding=flags.pad,
                                weight_decay=flags.weight_decay,
                                weights_init=weights_init,
                                activation=flags.activation)
    nfilters = nfilters[1:]
    strides = strides[1:]

    net, curr = layers.conv_sequence(first,
                                     nfilters,
                                     strides,
                                     block_fun=layers.conv_bn_relu,
                                     pad=flags.pad,
                                     weight_decay=flags.weight_decay,
                                     weights_init=weights_init,
                                     name_prefix='ptreg_',
                                     activation=flags.activation)

    net[name1st] = first
    return net, curr
Exemple #4
0
def image_transform_net(img_width, img_height, tv_weight=1):
    x = Input(shape=(img_width, img_height, 3), name="input")
    a = layers.InputNormalize()(x)
    a = layers.ReflectionPadding2D(padding=(40, 40),
                                   input_shape=(img_width, img_height, 3))(a)
    a = layers.conv_bn_relu(32, 9, 9, stride=(1, 1))(a)
    a = layers.conv_bn_relu(64, 9, 9, stride=(2, 2))(a)
    a = layers.conv_bn_relu(128, 3, 3, stride=(2, 2))(a)
    for _ in range(5):
        a = layers.res_conv(128, 3, 3)(a)
    a = layers.dconv_bn_nolinear(64, 3, 3)(a)
    a = layers.dconv_bn_nolinear(64, 3, 3)(a)
    a = layers.conv_bn_relu(3, 9, 9, stride=(1, 1), relu=False)(a)
    y = layers.Denormalize(name='transform_output')(a)
    model = Model(inputs=x, outputs=y)
    if tv_weight > 0:
        add_total_variation_loss(model.layers[-1], tv_weight)
    return model
Exemple #5
0
def image_transform_net(img_width,img_height,tv_weight=1):
    x = Input(shape=(img_width,img_height,3))
    a = InputNormalize()(x)
    a = ReflectionPadding2D(padding=(40,40),input_shape=(img_width,img_height,3))(a)
    a = conv_bn_relu(32, 9, 9, stride=(1,1))(a)
    a = conv_bn_relu(64, 9, 9, stride=(2,2))(a)
    a = conv_bn_relu(128, 3, 3, stride=(2,2))(a)
    for i in range(5):
        a = res_conv(128,3,3)(a)
    a = dconv_bn_nolinear(64,3,3)(a)
    a = dconv_bn_nolinear(32,3,3)(a)
    a = dconv_bn_nolinear(3,9,9,stride=(1,1),activation="tanh")(a)
    # Scale output to range [0, 255] via custom Denormalize layer
    y = Denormalize(name='transform_output')(a)
    
    model = Model(inputs=x, outputs=y)
    
    if tv_weight > 0:
        add_total_variation_loss(model.layers[-1],tv_weight)
        
    return model 
def image_transform_net(img_width, img_height, tv_weight=1):
    """
        Image tranform
        network model.
    """
    # Input layer as an RGB image
    x = Input(shape=(img_width, img_height, 3))

    # Normalize input image
    a = InputNormalize()(x)

    # Pad image
    a = ReflectionPadding2D(padding=(40, 40),
                            input_shape=(img_width, img_height, 3))(a)

    # Extract feature maps
    a = conv_bn_relu(32, 9, 9, stride=(1, 1))(a)
    a = conv_bn_relu(64, 3, 3,
                     stride=(2, 2))(a)  # The previous kernel size was 9x9
    a = conv_bn_relu(128, 3, 3, stride=(2, 2))(a)
    for _ in range(5):
        a = res_conv(128, 3, 3)(a)
    a = dconv_bn_nolinear(64, 3, 3)(a)
    a = dconv_bn_nolinear(32, 3, 3)(a)
    a = dconv_bn_nolinear(3, 9, 9, stride=(1, 1), activation="tanh")(a)

    # Scale output to range [0, 255] via custom Denormalize layer
    y = Denormalize(name='transform_output')(a)

    # Create model
    model = Model(inputs=x, outputs=y)

    # Total variation regularizer
    if tv_weight > 0:
        add_total_variation_loss(model.layers[-1], tv_weight)

    return model
Exemple #7
0
def get_tiramisu(input_shape, depth_list, growth_rate_list, wd=0.):
    inputs = Input(shape=input_shape, name='input')
    layer = inputs

    skip_connections = []
    for depth, growth_rate in zip(depth_list[:-1], growth_rate_list[:-1]):
        layer, _ = dense_block(layer, depth, growth_rate, wd=wd)
        skip_connections.append(layer)
        layer = conv_bn_relu(layer,
                             layer.get_shape().as_list()[-1],
                             filter_size=1,
                             strides=2,
                             wd=wd)

    layer, layer_list = dense_block(layer,
                                    depth_list[-1],
                                    growth_rate_list[-1],
                                    wd=wd)

    for depth, growth_rate in zip(reversed(depth_list[:-1]),
                                  reversed(growth_rate_list[:-1])):
        layer = concatenate(layer_list)
        layer = UpSampling2D(2)(layer)
        layer = Conv2DTranspose(layer.get_shape().as_list()[-1],
                                kernel_size=3,
                                strides=2,
                                padding='same',
                                kernel_initializer='he_uniform')(layer)
        layer, layer_list = dense_block(layer, depth, growth_rate, wd=wd)

    output = Conv2D(1,
                    1,
                    activation='sigmoid',
                    name='output',
                    kernel_regularizer=l2(wd))(layer)

    model = Model(inputs=inputs, outputs=output)
    model.compile(optimizer=SGD(0.005, momentum=0.9, nesterov=True),
                  loss=bce_dice_loss,
                  metrics=[dice_coef, false_positive, false_negative])
    model.summary()

    return model
def conv_from_flags(layer_in, flags):
    """ Build series of strided convolutional layers from flags.nfilters and flags.strides. """
    nfilters = [int(x) for x in flags.nfilters.split(",")]
    strides = [int(x) for x in flags.strides.split(",")]
    block_fun = layers.conv_bn_relu
    net = {}

    # first block is always conv_bn_relu
    if flags.pad_wrap:
        pad = 'wrap'
    else:
        pad = flags.pad
    net['conv0'], curr = dup(
        layers.conv_bn_relu(layer_in,
                            nfilters[0],
                            flags.filter_size,
                            'conv0',
                            strides=strides[0],
                            padding=pad,
                            weight_decay=flags.weight_decay,
                            weights_init=flags.weights_init,
                            activation=flags.activation))
    nfilters = nfilters[1:]
    strides = strides[1:]

    seq, curr = layers.conv_sequence(curr,
                                     nfilters,
                                     strides,
                                     block_fun,
                                     pad=pad,
                                     weight_decay=flags.weight_decay,
                                     activation=flags.activation,
                                     filter_size=flags.filter_size)

    for k, v in seq.items():
        net[k] = v

    net = finalize_conv_from_flags(net, curr, flags)

    return net