def U_net(input_size = (256,256,1)):
    N = input_size[0]
    inputs = Input(input_size) 
    c0 = layers.Conv2D(64, activation='relu', kernel_size=3)(inputs)
    c1 = layers.Conv2D(64, activation='relu', kernel_size=3)(c0)  # This layer for concatenating in the expansive part
    c2 = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c1)

    c3 = layers.Conv2D(128, activation='relu', kernel_size=3)(c2)
    c4 = layers.Conv2D(128, activation='relu', kernel_size=3)(c3)  # This layer for concatenating in the expansive part
    c5 = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c4)

    c6 = layers.Conv2D(256, activation='relu', kernel_size=3)(c5)
    c7 = layers.Conv2D(256, activation='relu', kernel_size=3)(c6)  # This layer for concatenating in the expansive part
    c8 = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c7)

    c9 = layers.Conv2D(512, activation='relu', kernel_size=3)(c8)
    c10 = layers.Conv2D(512, activation='relu', kernel_size=3)(c9)  # This layer for concatenating in the expansive part
    c11 = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(c10)

    c12 = layers.Conv2D(1024, activation='relu', kernel_size=3)(c11)
    c13 = layers.Conv2D(1024, activation='relu', kernel_size=3, padding='valid')(c12)

    # We will now start the second part of the U - expansive part
    t01 = layers.Conv2DTranspose(512, kernel_size=2, strides=(2, 2), activation='relu')(c13)
    crop01 = layers.Cropping2D(cropping=(4, 4))(c10)

    concat01 = layers.concatenate([t01, crop01], axis=-1)

    c14 = layers.Conv2D(512, activation='relu', kernel_size=3)(concat01)
    c15 = layers.Conv2D(512, activation='relu', kernel_size=3)(c14)

    t02 = layers.Conv2DTranspose(256, kernel_size=2, strides=(2, 2), activation='relu')(c15)
    crop02 = layers.Cropping2D(cropping=(16, 16))(c7)

    concat02 = layers.concatenate([t02, crop02], axis=-1)

    c16 = layers.Conv2D(256, activation='relu', kernel_size=3)(concat02)
    c17 = layers.Conv2D(256, activation='relu', kernel_size=3)(c16)

    t03 = layers.Conv2DTranspose(128, kernel_size=2, strides=(2, 2), activation='relu')(c17)
    crop03 = layers.Cropping2D(cropping=(40, 40))(c4)

    concat03 = layers.concatenate([t03, crop03], axis=-1)

    c18 = layers.Conv2D(128, activation='relu', kernel_size=3)(concat03)
    c19 = layers.Conv2D(128, activation='relu', kernel_size=3)(c18)

    t04 = layers.Conv2DTranspose(64, kernel_size=2, strides=(2, 2), activation='relu')(c19)
    crop04 = layers.Cropping2D(cropping=(88, 88))(c1)

    concat04 = layers.concatenate([t04, crop04], axis=-1)

    c20 = layers.Conv2D(64, activation='relu', kernel_size=3)(concat04)
    c21 = layers.Conv2D(64, activation='relu', kernel_size=3)(c20)

    outputs = layers.Conv2D(2, kernel_size=1)(c21)

    model = Model(inputs,outputs)
    model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
    return model
예제 #2
0
    def center_crop(original_ts, target_ts):
        crop_no = (K.int_shape(original_ts)[1] - K.int_shape(target_ts)[1]) // 2
        if (K.int_shape(original_ts)[1] - K.int_shape(target_ts)[1]) % 2 == 0:
            croped_ts = layers.Cropping2D(cropping=((crop_no, crop_no), (crop_no, crop_no)))(original_ts)
        else:
            croped_ts = layers.Cropping2D(cropping=((crop_no + 1, crop_no), (crop_no + 1, crop_no)))(original_ts)

        return croped_ts
예제 #3
0
def fusionnetPos(y):
    y, inTensor1, inTensor2 = y
    # crop the input images to the same size as network output.
    inCrop1 = layers.Cropping2D(cropping=((8, 8), (8, 8)))(inTensor1)
    inCrop2 = layers.Cropping2D(cropping=((8, 8), (8, 8)))(inTensor2)
    # y1, y2 = tf.split(y, [1, 1], axis = 3)
    # extend y1&y2 dimension to 3, consistant to color channels
    y1 = y[:, :, :, :1]
    y2 = y[:, :, :, 1:]
    y1 = K.tile(y1, [1, 1, 1, 3])
    y2 = K.tile(y2, [1, 1, 1, 3])
    y1 = layers.Multiply()([inCrop1, y1])
    y2 = layers.Multiply()([inCrop2, y2])
    y = layers.Add()([y1, y2])
    return y
예제 #4
0
 def expanding_block(self, filters, input_layer, concat_layer, name):
     ups = self.upscaling(input_layer, name)
     conv = layers.Conv2D(filters=filters,
                          kernel_size=2,
                          activation='relu',
                          padding='same',
                          name=name + "_conv_0")(ups)
     # compute cropping
     size1 = conv.get_shape().as_list()[1]
     size2 = concat_layer.get_shape().as_list()[1]
     if size1 != size2:
         cropping_left = (size2 - size1) // 2
         cropping_right = size2 - size1 - cropping_left
         cropping = ((cropping_left, cropping_right), (cropping_left,
                                                       cropping_right))
         # apply cropping
         crop = layers.Cropping2D(cropping,
                                  name=name + "_crop")(concat_layer)
         comb = layers.Concatenate(axis=3,
                                   name=name + "_concat")([crop, conv])
     else:
         comb = layers.Concatenate(axis=3, name=name +
                                   "_concat1")([concat_layer, conv])
     conv = layers.Conv2D(filters=filters,
                          kernel_size=3,
                          activation='relu',
                          padding=self.padding,
                          name=name + "_conv_1")(comb)
     conv = layers.Conv2D(filters=filters,
                          kernel_size=3,
                          activation='relu',
                          padding=self.padding,
                          name=name + "_conv_2")(conv)
     return conv
예제 #5
0
def cropping2d(cropping, suffix=None):
    """Keras Cropping2D wrapper with custom name convention."""
    (top, bottom), (left, right) = cropping
    name = 'cropping2d_t{}_b{}_l{}_r{}'.format(top, bottom, left, right)
    if suffix:
        name += '_{}'.format(suffix)
    return layers.Cropping2D(cropping=cropping, name=name)
def output_model_askbid(inp, params, output_shape, interpretable, **kwargs):
    if interpretable:
        out = inp
    else:
        out_inp = layers.InputLayer(input_shape=inp.get_shape().as_list()[1:],
                                    name='out_inp')
        out = out_inp.output
    out = layers.Cropping2D(cropping=((out.shape[1].value - 1, 0), (0, 0)),
                            name=f'out_cropping')(out)
    out = layers.Reshape(target_shape=[i.value for i in out.shape[2:]],
                         name='out_reshape')(out)
    out_ask = output_model_b(out,
                             params,
                             output_shape[0],
                             interpretable=kwargs.get('interpretable_nested',
                                                      True),
                             name='ask')
    out_bid = output_model_b(out,
                             params,
                             output_shape[0],
                             interpretable=kwargs.get('interpretable_nested',
                                                      True),
                             name='bid')
    out = layers.concatenate([out_ask, out_bid], name='out_concatenate')
    if interpretable:
        return out
    else:
        return models.Model(inputs=out_inp.input, outputs=out, name='out')(inp)
예제 #7
0
def dense_net(input_shape, output_shape, \
        dropout, cropped_size=(31,31), weight_decay=1E-4, \
        merged_model=False):
    input_layer = layers.Input(shape=input_shape)
    c = (((input_layer._keras_shape[1]-cropped_size[0])//2),\
            ((input_layer._keras_shape[2]-cropped_size[1])//2))
    crop_layer = layers.Cropping2D(c)(input_layer)
    init_conv = layers.Conv2D(filters=16, kernel_size=(1,1), \
            padding='same', use_bias=False,
            kernel_regularizer=l2(weight_decay))(crop_layer)
    #dense/transistion block 1
    dense = dense_block(init_conv, 12, dropout, weight_decay)
    trans = transistion_block(dense, 40, dropout, weight_decay)
    #dense/transistion block 2 
    dense = dense_block(trans, 12, dropout, weight_decay)
    trans = transistion_block(dense, 64, dropout, weight_decay)
    #dense/transistion block 3
    dense = dense_block(trans, 12, dropout, weight_decay)
    #trans = transistion_block(dense, 78, dropout, weight_decay)
    #dense block 4
    #dense = dense_block(trans, 12, dropout, weight_decay)
    x = layers.BatchNormalization(gamma_regularizer=l2(weight_decay), \
            beta_regularizer=l2(weight_decay))(dense)
    x = layers.Activation('relu')(x)
    x = layers.GlobalAveragePooling2D()(x)
    if merged_model:
        model = models.Model(inputs=input_layer, outputs=x)
    else: 
        output_layer = layers.Dense(units=output_shape, \
                activation="softmax", kernel_regularizer=l2(weight_decay), \
                bias_regularizer=l2(weight_decay))(x)
        model = models.Model(inputs=input_layer, outputs=output_layer)
    return(model)
예제 #8
0
def buildModel(shape, dr1=0.1, dr2=0.5):
    """ Build a keras model to be trained. This uses the architecture discussed in the lecture
  that is said to be published by the NVidia Autonomous Vehicle Team.

  'shape' is the input shape, assumed to be 3 dimensional.
  'dr1' is the drop out rate for the convolutional layers.
  'dr2' is the drop out rate for the fully connected layers.
  """
    assert len(shape) == 3

    # We import keras here to avoid importing it (and a ton of other stuff) when running
    # the 'show_gui.py' script (which imports this script).
    import keras.models as _kmod
    import keras.layers as _klay

    model = _kmod.Sequential()

    # First crop and normalize the image(s).
    # Note that this is part of the model, and not part of loading the data, since it
    # needs to be done when the model is invoked by the simulator (in drive.py), and I didn't
    # want to modify drive.py and try to keep it in sync with this.

    # Ignore the top 42% and the bottom 15%.
    cropTop = int(shape[0] * 0.42)
    cropBot = int(shape[0] * 0.15)
    model.add(
        _klay.Cropping2D(cropping=((cropTop, cropBot), (0, 0)),
                         input_shape=shape))

    # Use very basic image normalization to get values between -0.5 and 0.5.
    model.add(_klay.Lambda(lambda x: x / 255.0 - 0.5))

    # Do three 5x5 convolutions with stride 2.
    model.add(
        _klay.Convolution2D(24, 5, 5, subsample=(2, 2), activation='relu'))
    model.add(_klay.SpatialDropout2D(dr1))
    model.add(
        _klay.Convolution2D(36, 5, 5, subsample=(2, 2), activation='relu'))
    model.add(_klay.SpatialDropout2D(dr1))
    model.add(
        _klay.Convolution2D(48, 5, 5, subsample=(2, 2), activation='relu'))

    # Do two 3x3 convolutions with stride 1
    model.add(_klay.SpatialDropout2D(dr1))
    model.add(
        _klay.Convolution2D(64, 3, 3, subsample=(1, 1), activation='relu'))
    model.add(_klay.SpatialDropout2D(dr1))
    model.add(
        _klay.Convolution2D(64, 3, 3, subsample=(1, 1), activation='relu'))

    # Do three fully connected layers.
    model.add(_klay.Flatten())
    model.add(_klay.Dropout(dr2))
    model.add(_klay.Dense(100, activation='relu'))
    model.add(_klay.Dropout(dr2))
    model.add(_klay.Dense(50, activation='relu'))
    model.add(_klay.Dropout(dr2))
    model.add(_klay.Dense(1))

    return model
예제 #9
0
    def build_discriminator(self, alpha_leak=0.2):
        if (self.presentation):
            print("Building Discriminator...")

        img_shape = self.img_shape

        model = models.Sequential()

        model.add(
            layers.Conv2D(32 * self.channels, [2, 9],
                          strides=[1, 1],
                          padding="valid",
                          input_shape=img_shape))
        model.add(layers.Cropping2D(cropping=[[0, 1], [4, 4]]))
        model.add(LeakyReLU(alpha=alpha_leak))
        model.add(
            layers.Conv2D(64 * self.channels, [2, 9],
                          strides=[2, 2],
                          padding="same"))
        model.add(LeakyReLU(alpha=alpha_leak))
        model.add(layers.Flatten())
        model.add(layers.Dense(1024))
        model.add(LeakyReLU(alpha=alpha_leak))
        model.add(layers.Dense(1, activation="sigmoid"))

        model.summary()

        img = layers.Input(shape=img_shape)
        validity = model(img)

        return models.Model(img, validity)
예제 #10
0
def unet():
    input_img = layers.Input((256, 256, 3), name='RGB_Input')
    pp_in_layer = input_img
    if NET_SCALING is not None:
        pp_in_layer = layers.AvgPool2D(NET_SCALING)(pp_in_layer)

    pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(pp_in_layer)
    pp_in_layer = layers.BatchNormalization()(pp_in_layer)

    c1 = layers.Conv2D(8, (3, 3), activation='relu',
                       padding='same')(pp_in_layer)
    c1 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c1)
    p1 = layers.MaxPooling2D((2, 2))(c1)

    c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(p1)
    c2 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c2)
    p2 = layers.MaxPooling2D((2, 2))(c2)

    c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(p2)
    c3 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c3)
    p3 = layers.MaxPooling2D((2, 2))(c3)

    c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(p3)
    c4 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c4)
    p4 = layers.MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(p4)
    c5 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(c5)

    u6 = upsample(64, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = layers.concatenate([u6, c4])
    c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(u6)
    c6 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(c6)

    u7 = upsample(32, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = layers.concatenate([u7, c3])
    c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(u7)
    c7 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(c7)

    u8 = upsample(16, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = layers.concatenate([u8, c2])
    c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(u8)
    c8 = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(c8)

    u9 = upsample(8, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = layers.concatenate([u9, c1], axis=3)
    c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(u9)
    c9 = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(c9)

    d = layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
    d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
    d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)
    if NET_SCALING is not None:
        d = layers.UpSampling2D(NET_SCALING)(d)

    seg_model = models.Model(input[input_img], outputs=[d])
    return seg_model
예제 #11
0
def atr_tiny_top(buff, out_size, num_chans, n_cls):
    filts = 128
    n_post_block = 4

    xconv0, xconv1, x1, x2, x3, inputs = atr_tiny_bot(buff, out_size,
                                                      num_chans, n_post_block)

    xconv0 = conv_bn(xconv0, filts)
    xconv0 = conv_bn(xconv0, filts)

    xconv1 = conv_bn(xconv1, filts)
    xconv1 = conv_bn(xconv1, filts)

    x1 = conv_bn(x1, filts)
    x1 = conv_bn(x1, filts)

    x2 = conv_bn(x2, filts)
    x2 = conv_bn(x2, filts)

    x3 = conv_bn(x3, filts)
    x3 = conv_bn(x3, filts)

    x = l.concatenate([xconv0, xconv1, x1, x2, x3])
    x = conv_bn(x, filts * 2)
    x = conv_bn(x, filts * 2)

    x3 = l.Cropping2D(cropping=((2, 2), (2, 2)))(x3)
    x2 = l.Cropping2D(cropping=((2, 2), (2, 2)))(x2)
    x1 = l.Cropping2D(cropping=((2, 2), (2, 2)))(x1)
    xconv0 = l.Cropping2D(cropping=((2, 2), (2, 2)))(xconv0)
    xconv1 = l.Cropping2D(cropping=((2, 2), (2, 2)))(xconv1)

    x = l.concatenate([x, xconv0, xconv1, x1, x2, x3])
    x = l.Dropout(0.5)(x)
    x = conv1_bn(x, 8 * filts)
    x = l.Dropout(0.5)(x)
    x = l.Conv2D(n_cls, (1, 1), activation='sigmoid')(x)

    model = Model(inputs=inputs, outputs=x)

    model.compile(optimizer=Adam(lr=0.00001),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
예제 #12
0
def pad_to_scale(x, scale, size=300):
	expected = int(np.ceil(size / (2. ** scale)))
	diff = expected - int(x.shape[1])
	if diff > 0:
		left = diff // 2
		right = diff - left
		x = reflectpad(x, (left, right))
	elif diff < 0:
		left = -diff // 2
		right = -diff - left
		x = layers.Cropping2D(((left, right), (left, right)))(x)
	return x
예제 #13
0
def crop(o1, o2, i):
    o_shape2 = Model(i, o2).output_shape

    if IMAGE_ORDERING == 'channels_first':
        output_height2 = o_shape2[2]
        output_width2 = o_shape2[3]
    else:
        output_height2 = o_shape2[1]
        output_width2 = o_shape2[2]

    o_shape1 = Model(i, o1).output_shape
    if IMAGE_ORDERING == 'channels_first':
        output_height1 = o_shape1[2]
        output_width1 = o_shape1[3]
    else:
        output_height1 = o_shape1[1]
        output_width1 = o_shape1[2]

    cx = abs(output_width1 - output_width2)
    cy = abs(output_height2 - output_height1)

    if output_width1 > output_width2:
        o1 = KL.Cropping2D(cropping=((0, 0), (0, cx)),
                           data_format=IMAGE_ORDERING)(o1)
    else:
        o2 = KL.Cropping2D(cropping=((0, 0), (0, cx)),
                           data_format=IMAGE_ORDERING)(o2)

    if output_height1 > output_height2:
        o1 = KL.Cropping2D(cropping=((0, cy), (0, 0)),
                           data_format=IMAGE_ORDERING)(o1)
    else:
        o2 = KL.Cropping2D(cropping=((0, cy), (0, 0)),
                           data_format=IMAGE_ORDERING)(o2)

    return o1, o2
예제 #14
0
def crop(o1, o2, img_input):
    o_shape1=Model(img_input, o1).output_shape
    outputHeight1=o_shape1[1]
    outputWidth1=o_shape1[2]
    
    o_shape2=Model(img_input, o2).output_shape
    outputHeight2=o_shape2[1]
    outputWidth2=o_shape2[2]
    
    cx=abs(outputWidth1-outputWidth2)
    cy=abs(outputHeight1-outputHeight2)
    
    # 对2D输入(图像)进行裁剪,将在空域维度,即宽和高的方向上裁剪
    if outputWidth1>outputWidth2:
        o1=layers.Cropping2D(cropping=((0,0),(0,cx)), data_format='channels_last')(o1)
    else:
        o2=layers.Cropping2D(cropping=((0,0),(0,cx)), data_format='channels_last')(o2)
        
    if outputHeight1>outputHeight2:
        o1=layers.Cropping2D(cropping=((0,cy),(0,0)), data_format='channels_last')(o1)
    else:
        o2=layers.Cropping2D(cropping=((0,cy),(0,0)), data_format='channels_last')(o2)
        
    return o1,o2
예제 #15
0
def build_vae(data, n_filters=[32, 64, 64], kernel_sizes=[8, 4, 3], strides=[4, 2, 1],
                    fc_sizes=[256], latent_size=32, batch_size=32, epochs=10):

    input_shape = data[0].shape

    enc_input = layers.Input(shape=input_shape, name='encoder_input')
    x = enc_input

    for filters, kernel_size, stride in zip(n_filters, kernel_sizes, strides):
        x = layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=stride, padding='same', activation='relu')(x)
    
    pre_fc_shape = K.int_shape(x)
    x = layers.Flatten()(x)
    x = layers.Dense(256, activation='relu')(x)
    for fc_size in fc_sizes:
        x = layers.Dense(fc_size, activation='relu')(x)

    z_mean = layers.Dense(latent_size, name='z_mean')(x)
    z_logvar = layers.Dense(latent_size, name='z_logvar')(x)
    z = layers.Lambda(sample_params, output_shape=(latent_size,), name='z')([z_mean, z_logvar])

    encoder = models.Model(enc_input, [z_mean, z_logvar, z], name='encoder')
    encoder.summary()

    latent_input = layers.Input(shape=(latent_size,), name='latent')
    x = latent_input

    for fc_size in fc_sizes[::-1]:
        x = layers.Dense(fc_size, activation='relu')(x)
    
    x = layers.Dense(pre_fc_shape[1] * pre_fc_shape[2] * pre_fc_shape[3], activation='relu')(x)
    x = layers.Reshape((pre_fc_shape[1], pre_fc_shape[2], pre_fc_shape[3]))(x)

#     n_filters = [3, 32, 64]
    for filters, kernel_size, stride in zip(n_filters[::-1], kernel_sizes[::-1], strides[::-1]):
        x = layers.Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=stride, padding='same', activation='relu')(x)

    dec_output = layers.Conv2DTranspose(filters=3, kernel_size=1, strides=1, padding='same', activation='sigmoid')(x)
    dec_output = layers.Cropping2D((3,0), name='decoder_output')(dec_output)

    decoder = models.Model(latent_input, dec_output, name='decoder')
    decoder.summary()

    input = enc_input
    output = decoder(encoder(input)[2])
    vae = models.Model(input, output, name='vae')

    return vae, encoder, decoder, input, output, z_mean, z_logvar
예제 #16
0
def second_branch(inputs, activation, L):
    x = L.Cropping2D(cropping=(OFFSET, OFFSET))(inputs)
    x = L.Conv2D(16, 2, strides=2, padding=PADDING)(x)
    x = L.BatchNormalization()(x)
    x = activation(x)
    x = L.Conv2D(32, 2, strides=2, padding=PADDING)(x)
    x = L.BatchNormalization()(x)
    x = activation(x)
    x = L.Conv2D(64, 2, strides=2, padding=PADDING)(x)
    x = L.BatchNormalization()(x)
    x = activation(x)
    x = L.Conv2D(64, 3, strides=3, padding=PADDING)(x)
    x = L.BatchNormalization()(x)
    x = activation(x)
    x = L.Conv2D(32, 1, strides=1, padding='SAME')(x)
    return x
예제 #17
0
def split_branch(input_tensor, cropped_size, filters, kernel_size, \
        stride, dropout):
    c = (((input_tensor._keras_shape[1]-cropped_size[0])//2),\
            ((input_tensor._keras_shape[2]-cropped_size[1])//2))
    x_s = layers.Cropping2D(c)(input_tensor)
    x_s = bn_block(x_s, filters[0], kernel_size, \
           stride, dropout)
    x_s = conv_block(x_s, filters, kernel_size, stride,\
            dropout)
    x_s = identity_block(x_s, filters, kernel_size)
    x_s = conv_block(x_s, filters*2, kernel_size, stride,\
            dropout)
    x_s = identity_block(x_s, filters*2, kernel_size)
    x_s = identity_block(x_s, filters*2, kernel_size)
    x_s = layers.AveragePooling2D((3,3))(x_s)
    split_flat = layers.Flatten()(x_s)
    return(split_flat)
예제 #18
0
    def build_generator(self, momentum=0.8, alpha_leak=0.2):
        if (self.presentation):
            print("Building Generator...")
        noise_shape = self.noise_shape

        model = models.Sequential()

        model.add(layers.Dense(1024, input_shape=noise_shape))
        model.add(LeakyReLU(alpha=alpha_leak))
        model.add(layers.BatchNormalization(momentum=momentum))

        model.add(
            layers.Dense(
                64 * self.channels * (1 + self.img_dim[0] // 2) *
                (self.img_dim[1] //
                 2)))  # 64 filters, each of half the size of the input image
        model.add(LeakyReLU(alpha=alpha_leak))
        model.add(layers.BatchNormalization(momentum=momentum))
        model.add(
            layers.Reshape([(1 + self.img_dim[0] // 2), (self.img_dim[1] // 2),
                            64 * self.channels]))

        model.add(
            layers.Conv2DTranspose(32 * self.channels, [2, 5], padding="same")
        )  # 32 filters, each of the size of the input image
        model.add(LeakyReLU(alpha=alpha_leak))
        model.add(layers.BatchNormalization(momentum=momentum))
        model.add(
            layers.UpSampling2D(size=[2, 2])
        )  # now the filters are of the full size of the image, with 1 additional row at the top
        model.add(layers.Cropping2D(
            cropping=[[1, 0], [0, 0]]))  # remove extra padding at the top

        model.add(
            layers.Conv2DTranspose(self.channels, [2, 5],
                                   padding="same",
                                   activation="tanh"))  # image

        model.summary()

        noise = layers.Input(shape=noise_shape)
        img = model(noise)

        return models.Model(noise, img)
예제 #19
0
def multi_dense_net(input_shape, output_shape, \
        dropout, cropped_size=(31,31), weight_decay=1E-4, \
        merged_model=False):
    input_layer = layers.Input(shape=input_shape)
    c = (((input_layer._keras_shape[1]-cropped_size[0])//2),\
            ((input_layer._keras_shape[2]-cropped_size[1])//2))
    crop_layer = layers.Cropping2D(c)(input_layer)
    init_conv = layers.Conv2D(filters=16, kernel_size=(1,1), \
            padding='same', use_bias=False,
            kernel_regularizer=l2(weight_decay))(crop_layer)
    #dense/transistion block 1
    dense = dense_block(init_conv, 12, dropout, weight_decay)
    trans = transistion_block(dense, 40, dropout, weight_decay)
    #dense/transistion block 2 
    dense = dense_block(trans, 12, dropout, weight_decay)
    # here we utilize the original image to train on also
    init_conv2 = layers.Conv2D(filters=16, kernel_size=(1,1), \
            padding='same', use_bias=False,
            kernel_regularizer=l2(weight_decay))(input_layer)
    dense2 = dense_block(init_conv2, 12, dropout, weight_decay)
    trans2 = transistion_block(dense2, 64, dropout, weight_decay)
    dense2 = dense_block(trans2, 12, dropout, weight_decay)
    x2 = layers.BatchNormalization(gamma_regularizer=l2(weight_decay), \
            beta_regularizer=l2(weight_decay))(dense2)
    x2 = layers.Activation('relu')(x2)
    x2 = layers.GlobalAveragePooling2D()(x2)
    #trans = transistion_block(dense, 78, dropout, weight_decay)
    #dense block 4
    #dense = dense_block(trans, 12, dropout, weight_decay)
    x = layers.BatchNormalization(gamma_regularizer=l2(weight_decay), \
            beta_regularizer=l2(weight_decay))(dense)
    x = layers.Activation('relu')(x)
    x = layers.GlobalAveragePooling2D()(x)
    #joining differing spatial views
    x = layers.Concatenate()([x,x2])
    if merged_model:
        model = models.Model(inputs=input_layer, outputs=x)
    else: 
        output_layer = layers.Dense(units=output_shape, \
                activation="softmax", kernel_regularizer=l2(weight_decay), \
                bias_regularizer=l2(weight_decay))(x)
        model = models.Model(inputs=input_layer, outputs=output_layer)
    return(model)
예제 #20
0
def get_model0(Input_img_shape=(300, 300, 3)):
    def conv_bn(x, filt, dl_rate=(1, 1), preblock=False):
        y = layers.Convolution2D(filt, (3, 3),
                                 activation='linear',
                                 padding='same',
                                 dilation_rate=dl_rate,
                                 use_bias=False)(x)
        if preblock:
            return y
        y = layers.BatchNormalization()(y)
        return layers.Activation('elu')(y)

    #in_layer = layers.Input(t_x.shape[1:], name = 'RGB_Input')
    in_layer = layers.Input(Input_img_shape, name='RGB_Input')
    pp_in_layer = layers.GaussianNoise(GAUSSIAN_NOISE)(in_layer)
    pp_in_layer = layers.BatchNormalization()(pp_in_layer)

    c = conv_bn(pp_in_layer, BASE_DEPTH // 2)
    c = conv_bn(c, BASE_DEPTH // 2)
    c = conv_bn(c, BASE_DEPTH)

    skip_layers = [pp_in_layer]
    for j in range(BLOCK_COUNT):
        depth_steps = int(np.log2(Input_img_shape[0]) - 2)
        d = layers.concatenate(skip_layers + [
            conv_bn(c, BASE_DEPTH * 2**j, (2**i, 2**i), preblock=True)
            for i in range(depth_steps)
        ])
        d = layers.SpatialDropout2D(SPATIAL_DROPOUT)(d)
        d = layers.BatchNormalization()(d)
        d = layers.Activation('elu')(d)
        # bottleneck
        d = conv_bn(d, BASE_DEPTH * 2**(j + 1))
        skip_layers += [c]
        c = d

    d = layers.Convolution2D(1, (1, 1), activation='sigmoid',
                             padding='same')(d)
    d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
    d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)
    seg_model = models.Model(inputs=[in_layer], outputs=[d])
    #seg_model.summary()
    return seg_model
예제 #21
0
 def block(self, x, f, down=True, bn=True, dropout=False, leaky=True):
     if leaky:
         x = LeakyReLU(0.2)(x)
     else:
         x = layers.Activation('relu')(x)
     if down:
         x = layers.ZeroPadding2D()(x)
         x = layers.Conv2D(f, kernel_size=4, strides=2, use_bias=False)(x)
     else:
         x = layers.Conv2DTranspose(f,
                                    kernel_size=4,
                                    strides=2,
                                    use_bias=False)(x)
         x = layers.Cropping2D((1, 1))(x)
     if bn:
         x = InstanceNormalization()(x)
     if dropout:
         x = layers.Dropout(0.5)(x)
     return x
예제 #22
0
def create_generator():
    noise = layers.Input(shape=(NOISE_DIM,))
    x = layers.Dense(4 * 4 * 256, use_bias=False)(noise)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU(0.2)(x)

    x = layers.Reshape((4, 4, 256))(x)
    x = upsample_block(x, 128, layers.LeakyReLU(0.2), strides=(1, 1),
                       use_bias=False, use_bn=True, padding="same", use_dropout=False)

    x = upsample_block(x, 64, layers.LeakyReLU(0.2), strides=(1, 1),
                       use_bias=False, use_bn=True, padding="same", use_dropout=False)

    x = upsample_block(x, 1, layers.Activation("tanh"), strides=(1, 1),
                       use_bias=False, use_bn=True)

    output = layers.Cropping2D((2, 2))(x)
    g_model = keras.Model(noise, output, name="generator")
    return g_model
예제 #23
0
def build_model():
    x_in = layers.Input((160, 320, 3))
    x = layers.Cropping2D(cropping=((75, 25), (0, 0)))(x_in)
    # x = layers.Lambda(lambda x_img: tf.image.resize_images(x_img, (66,200)), name='resize')(x)
    x = layers.Lambda(lambda x_img: x_img / 127.5 - 1.0, name='normalize')(x)
    x = layers.Conv2D(24, (5, 5), strides=(2, 2), activation='relu')(x)
    x = layers.Conv2D(36, (5, 5), strides=(2, 2), activation='relu')(x)
    x = layers.Conv2D(48, (5, 5), strides=(2, 2), activation='relu')(x)
    x = layers.Conv2D(64, (3, 3), strides=(1, 1), activation='relu')(x)
    # x = layers.Conv2D(64, (3,3), strides=(1,1), activation='relu')(x)
    x = layers.Flatten()(x)
    x = layers.Dropout(0.4)(x)
    x = layers.Dense(100, activation='relu')(x)
    x = layers.Dropout(0.4)(x)
    x = layers.Dense(50, activation='relu')(x)
    x = layers.Dropout(0.4)(x)
    x = layers.Dense(10, activation='relu')(x)
    x = layers.Dense(1)(x)

    return Model(inputs=x_in, outputs=x)
예제 #24
0
def driving_model(img_df, batch_size=32):
    """Define and train driving model."""
    train_df, valid_df = skms.train_test_split(
        img_df,
        test_size=FLAGS.validation_proportion,
        random_state=1,
        shuffle=True)
    train_gen = images_generator(train_df,
                                 training=True,
                                 batch_size=batch_size)
    # Augmentation factor
    aug_factor = 2
    steps_per_epoch = math.ceil(len(train_df) * aug_factor / batch_size)
    valid_gen = images_generator(valid_df,
                                 training=False,
                                 batch_size=batch_size)
    valid_steps = math.ceil(len(valid_df) / batch_size)

    model = Sequential()
    model.add(
        layers.Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))
    model.add(layers.Cropping2D(cropping=((50, 20), (0, 0))))

    if FLAGS.model == "lenet":
        lenet_model(model)
    elif FLAGS.model == "nvidia":
        nvidia_model(model)
    else:
        raise ValueError("Invalid model.")

    model.compile(loss="mse", optimizer="adam")
    print(model.summary())

    model.fit_generator(train_gen,
                        steps_per_epoch=steps_per_epoch,
                        epochs=FLAGS.epochs,
                        validation_data=valid_gen,
                        validation_steps=valid_steps,
                        verbose=1)

    model.save(FLAGS.model_file)
예제 #25
0
def get_model(input_shape=(513, 513, 3), output_dims=(513, 513), atrous_rate=(6, 12, 18), class_no=21, crop=False,
              crop_size=None, count_mid_flow=16):
    input_tensor = layers.Input(shape=input_shape)
    with tf.variable_scope("encoder"):
        encoder = get_enhanced_xception(input_tensor=input_tensor, count_mid_flow=count_mid_flow)
        x_output = encoder.output

        # for layer in encoder.layers:  #  not available as pre train model is not ready here.
        #     layer.trainable = False

        x = get_separable_atrous_conv(x_output, atrous_rate=atrous_rate)

        x = layers.Conv2D(256, (1, 1), padding='same', use_bias=False, name='concat_projection',
                          kernel_initializer='he_normal')(x)
        x = layers.BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
        x = layers.Activation('relu')(x)
        x = layers.Dropout(0.1)(x)

    with tf.variable_scope("decoder"):
        # x4 (x2) block
        skip1 = encoder.get_layer('entry_block2_c2_bn').output

        x = BilinearResizeLayer2D(target_size=(K.int_shape(skip1)[1], K.int_shape(skip1)[2]), name='UpSampling1')(x)

        dec_skip1 = layers.Conv2D(48, (1, 1), padding='same', use_bias=False, name='feature_projection0',
                                  kernel_initializer='he_normal')(skip1)
        dec_skip1 = layers.BatchNormalization(name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
        dec_skip1 = layers.Activation('relu')(dec_skip1)
        x = layers.Concatenate()([x, dec_skip1])

        x = layers.Conv2D(class_no, (1, 1), padding='same', kernel_initializer='he_normal')(x)
        x = BilinearResizeLayer2D(target_size=output_dims, name='UpSampling2')(x)
        if crop:
            x = layers.Cropping2D(crop_size)(x)
    if class_no == 1:
        x = layers.Activation('sigmoid')(x)
    else:
        x = layers.Activation('softmax')(x)
    model = models.Model(inputs=input_tensor, outputs=x, name='deeplab_v3plus')

    return model
예제 #26
0
def create_model():
    ch, row, col = 3, 160, 320  # Trimmed image format
    top_crop = row * 8 // 20
    print(top_crop)
    model = Sequential()
    local = False
    # Preprocess incoming data, centered around zero with small standard deviation 
    #model.add(layers.Dropout(0, input_shape=[160, 320, 3]))
    model.add(layers.Cropping2D(cropping=((50,20), (0,0)), input_shape=[row, col, ch]))
    #model.add(layers.Lambda(lambda x: normalize_data(x)))
    if not local:
        add_conv2d(model, 32, True)
        add_conv2d(model, 32)
        add_conv2d(model, 32)
        add_conv2d(model, 32, True)
        add_conv2d(model, 64)
        add_conv2d(model, 64)
        add_conv2d(model, 64, True)
        add_conv2d(model, 128)
        add_conv2d(model, 128)
        add_conv2d(model, 128, True)
        add_conv2d(model, 256)
        model.add(layers.MaxPool2D(2, strides=[1,2]))
        add_conv2d(model, 256)
        add_conv2d(model, 256, True)
        add_conv2d(model, 512, True)

    #model.add(... finish defining the rest of your model architecture here ...)
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.25))
    model.add(layers.Dense(32, activation="relu"))
    model.add(layers.BatchNormalization())
    model.add(layers.Dense(1))
    model.compile(loss='mse', optimizer=keras.optimizers.Adam(lr=0.001))
    model.summary()
    return model
예제 #27
0
u8 = layers.concatenate([u8, c2])
c8 = cv(16, u8)
c8 = cv(16, c8)

u9 = ccup(
    c8, 8
)  # u9 = layers.Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = layers.concatenate([u9, c1], axis=3)
c9 = cv(8, u9)
c9 = cv(8, c9)

d = layers.Conv2D(1, (1, 1),
                  activation='sigmoid',
                  kernel_initializer='zeros',
                  bias_initializer='zeros')(c9)
d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)

seg_model = models.Model(inputs=[input_img], outputs=[d])
seg_model.summary()

# In[ ]:

import keras.backend as K
from keras.optimizers import Adam
from keras.losses import binary_crossentropy


def dice_coef(y_true, y_pred, smooth=1.):
    intersection = K.sum(y_true * y_pred, axis=[1, 2, 3])
    union = K.sum(y_true, axis=[1, 2, 3]) + K.sum(
예제 #28
0
def _adjust_block(p, ip, filters, block_id=None):
    '''Adjusts the input `previous path` to match the shape of the `input`.

    Used in situations where the output number of filters needs to be changed.

    # Arguments
        p: Input tensor which needs to be modified
        ip: Input tensor whose shape needs to be matched
        filters: Number of output filters to be matched
        block_id: String block_id

    # Returns
        Adjusted Keras tensor
    '''
    channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
    img_dim = 2 if backend.image_data_format() == 'channels_first' else -2

    ip_shape = backend.int_shape(ip)

    if p is not None:
        p_shape = backend.int_shape(p)

    with backend.name_scope('adjust_block'):
        if p is None:
            p = ip

        elif p_shape[img_dim] != ip_shape[img_dim]:
            with backend.name_scope('adjust_reduction_block_%s' % block_id):
                p = layers.Activation('relu',
                                      name='adjust_relu_1_%s' % block_id)(p)
                p1 = layers.AveragePooling2D(
                    (1, 1),
                    strides=(2, 2),
                    padding='valid',
                    name='adjust_avg_pool_1_%s' % block_id)(p)
                p1 = layers.Conv2D(filters // 2, (1, 1),
                                   padding='same',
                                   kernel_regularizer=l2(weight_decay),
                                   use_bias=False,
                                   name='adjust_conv_1_%s' % block_id,
                                   kernel_initializer='he_normal')(p1)

                p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
                p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
                p2 = layers.AveragePooling2D(
                    (1, 1),
                    strides=(2, 2),
                    padding='valid',
                    name='adjust_avg_pool_2_%s' % block_id)(p2)
                p2 = layers.Conv2D(filters // 2, (1, 1),
                                   padding='same',
                                   kernel_regularizer=l2(weight_decay),
                                   use_bias=False,
                                   name='adjust_conv_2_%s' % block_id,
                                   kernel_initializer='he_normal')(p2)

                p = layers.concatenate([p1, p2], axis=channel_dim)
                if use_bn:
                    p = layers.BatchNormalization(axis=channel_dim,
                                                  momentum=bn_momentum,
                                                  epsilon=1e-3,
                                                  name='adjust_bn_%s' %
                                                  block_id)(p)

        elif p_shape[channel_dim] != filters:
            with backend.name_scope('adjust_projection_block_%s' % block_id):
                p = layers.Activation('relu')(p)
                p = layers.Conv2D(filters, (1, 1),
                                  strides=(1, 1),
                                  kernel_regularizer=l2(weight_decay),
                                  padding='same',
                                  name='adjust_conv_projection_%s' % block_id,
                                  use_bias=False,
                                  kernel_initializer='he_normal')(p)
                if use_bn:
                    p = layers.BatchNormalization(axis=channel_dim,
                                                  momentum=bn_momentum,
                                                  epsilon=1e-3,
                                                  name='adjust_bn_%s' %
                                                  block_id)(p)
    return p
# Note: Not used
model_pretrained = MobileNet(input_shape=(nn_Imgsize, nn_Imgsize, 3),
                             alpha=1.0,
                             include_top=False,
                             weights='imagenet')

# Print model layers
#model_pretrained.summary()
# Freeze layers
#freezeLayers(model_pretrained, 'conv2d_94')

# Input layer
model_input = layers.Input(shape=imgShape)

# Crop input images
crop_out = layers.Cropping2D(cropping=((cropTop, cropBottom),
                                       (0, 0)))(model_input)

# Re-sizes and Normalize the input with Kera's Lambda layer & attach to model_input
resized_input = layers.Lambda(ResizeNormalizeLambda,
                              arguments={'imgsize': nn_Imgsize})(crop_out)

# Build network Connect layers
# deep_nn_out = model_pretrained(resized_input)
conv = layers.Conv2D(8, (5, 5), padding='same',
                     activation='relu')(resized_input)
conv = layers.MaxPooling2D((2, 2), padding='same')(conv)
conv = layers.Conv2D(32, (5, 5), padding='same', activation='relu')(conv)
conv = layers.MaxPooling2D((4, 4), padding='same')(conv)
conv = layers.Conv2D(64, (3, 3), padding='same', activation='relu')(conv)
conv = layers.AveragePooling2D((4, 4), padding='same')(conv)
예제 #30
0
def fcn8_graph(feature_map, config, mode=None):
    '''Builds the computation graph of Region Proposal Network.

    feature_map:            Contextual Tensor [batch, num_classes, width, depth]

    Returns:


    '''
    print()
    print('---------------')
    print('>>> FCN8 Layer - mode:', mode)
    print('---------------')
    batch_size = config.BATCH_SIZE
    height, width = config.FCN_INPUT_SHAPE[0:2]
    num_classes = config.NUM_CLASSES
    rois_per_class = config.TRAIN_ROIS_PER_IMAGE
    weight_decay = config.WEIGHT_DECAY
    # In the original implementatoin , batch_momentum was used for batch normalization layers for the ResNet
    # backbone. We are not using this backbone in FCN, therefore it is unused.
    # batch_momentum    = config.BATCH_MOMENTUM
    verbose = config.VERBOSE
    feature_map_shape = (width, height, num_classes)
    print('     feature map      :', feature_map.shape)
    print('     height :', height, 'width :', width, 'classes :', num_classes)
    print('     image_data_format: ', KB.image_data_format())
    print('     rois_per_class   : ', KB.image_data_format())

    if mode == 'training':
        KB.set_learning_phase(1)
    else:
        KB.set_learning_phase(0)
    print('     Set learning phase to :', KB.learning_phase())

    # feature_map = KL.Input(shape= feature_map_shape, name="input_fcn_feature_map")

    # TODO: Assert proper shape of input [batch_size, width, height, num_classes]
    # TODO: check if stride of 2 causes alignment issues if the featuremap is not even.

    # if batch_shape:
    # img_input = Input(batch_shape=batch_shape)
    # image_size = batch_shape[1:3]
    # else:
    # img_input = Input(shape=input_shape)
    # image_size = input_shape[0:2]

    ##-------------------------------------------------------------------------------------------------------
    ## Block 1    data_format='channels_last',
    ##-------------------------------------------------------------------------------------------------------
    x = KL.Conv2D(64, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block1_conv1',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(feature_map)
    print('   Input feature map                   : ', feature_map.shape)
    logt('Input feature map ', feature_map, verbose=1)

    logt('FCN Block 11 ', x, verbose=verbose)

    x = KL.Conv2D(64, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block1_conv2',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    logt('FCN Block 12 ', x, verbose=verbose)

    x = KL.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
    logt('FCN Block 13 (Max pooling) ', x, verbose=verbose)

    ##-------------------------------------------------------------------------------------------------------
    ## Block 2
    ##-------------------------------------------------------------------------------------------------------
    x = KL.Conv2D(128, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block2_conv1',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    logt('FCN Block 21  ', x, verbose=verbose)

    x = KL.Conv2D(128, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block2_conv2',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    logt('FCN Block 22 ', x, verbose=verbose)

    x = KL.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
    logt('FCN Block 23 (Max pooling) ', x, verbose=verbose)

    ##-------------------------------------------------------------------------------------------------------
    ## Block 3
    ##-------------------------------------------------------------------------------------------------------
    x = KL.Conv2D(256, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block3_conv1',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    logt('FCN Block 31  ', x, verbose=verbose)

    x = KL.Conv2D(256, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block3_conv2',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    logt('FCN Block 32 ', x, verbose=verbose)

    x = KL.Conv2D(256, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block3_conv3',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    logt('FCN Block 33 ', x, verbose=verbose)

    Pool3 = KL.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
    logt('FCN Block 34 (Max pooling) ', Pool3, verbose=verbose)

    ##-------------------------------------------------------------------------------------------------------
    ## Block 4
    ##-------------------------------------------------------------------------------------------------------
    x = KL.Conv2D(512, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block4_conv1',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(Pool3)
    logt('FCN Block 41 ', x, verbose=verbose)

    x = KL.Conv2D(512, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block4_conv2',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    logt('FCN Block 42 ', x, verbose=verbose)

    x = KL.Conv2D(512, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block4_conv3',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    logt('FCN Block 43 ', x, verbose=verbose)

    Pool4 = KL.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
    logt('FCN Block 44 (Max pooling) ', Pool4, verbose=verbose)

    ##-------------------------------------------------------------------------------------------------------
    ## Block 5
    ##-------------------------------------------------------------------------------------------------------
    x = KL.Conv2D(512, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block5_conv1',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(Pool4)
    logt('FCN Block 51 ', x, verbose=verbose)

    x = KL.Conv2D(512, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block5_conv2',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    logt('FCN Block 52 ', x, verbose=verbose)

    x = KL.Conv2D(512, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block5_conv3',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    logt('FCN Block 53 ', x, verbose=verbose)

    x = KL.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    logt('FCN Block 54 (Max pooling) ', x, verbose=verbose)

    ##-------------------------------------------------------------------------------------------------------
    ## FCN32 Specific Structure
    ##-------------------------------------------------------------------------------------------------------
    # Convolutional layers transfered from fully-connected layers
    # changed from 4096 to 2048 - reduction of weights from 42,752,644 to
    # changed ftom 2048 to 1024 - 11-05-2018
    # FC_SIZE = 2048
    FC_SIZE = 4096
    x = KL.Conv2D(FC_SIZE, (7, 7),
                  activation='relu',
                  padding='same',
                  name='fcn32_fc1',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    print()
    print('   --- FCN32 ----------------------------')
    logt(' FCN fully connected 1 (fc1) ', x, verbose=verbose)

    x = KL.Dropout(0.5)(x)
    x = KL.Conv2D(FC_SIZE, (1, 1),
                  activation='relu',
                  padding='same',
                  name='fcn32_fc2',
                  kernel_initializer='glorot_uniform',
                  bias_initializer='zeros')(x)
    logt('FCN fully connected 2 (fc2) ', x, verbose=verbose)

    x = KL.Dropout(0.5)(x)
    # Classifying layer
    x = KL.Conv2D(num_classes, (1, 1),
                  activation='linear',
                  padding='valid',
                  strides=(1, 1),
                  name='fcn32_deconv2D',
                  kernel_initializer='he_normal',
                  bias_initializer='zeros')(x)
    logt('FCN conv2d (fcn32_deconv2D)  ', x, verbose=verbose)

    ##-------------------------------------------------------------------------------------------------------
    ## FCN16 Specific Structure
    ##-------------------------------------------------------------------------------------------------------
    # Score Pool4 - Reduce Pool4 filters from 512 to num_classes (81)
    scorePool4 = KL.Conv2D(num_classes, (1, 1),
                           activation='relu',
                           padding='valid',
                           name='fcn16_score_pool4',
                           kernel_initializer='glorot_uniform',
                           bias_initializer='zeros')(Pool4)
    print()
    print('   --- FCN16 ----------------------------')
    logt('FCN scorePool4 (Conv2D(Pool4)) ', scorePool4, verbose=verbose)

    # 2x Upsampling of fcn_deconv2D  to generate Score2 (padding was originally "valid")
    x = KL.Deconvolution2D(num_classes,
                           kernel_size=(4, 4),
                           activation=None,
                           padding='valid',
                           name='fcn16_score2',
                           strides=(2, 2))(x)
    logt('FCN 2x Upsampling (Deconvolution2D(fcn32_classify))  ',
         x,
         verbose=verbose)

    # Crop to appropriate shape if required
    score2_c = KL.Cropping2D(cropping=((1, 1), (1, 1)),
                             name='fcn16_crop_score2')(x)
    logt('FCN 2x Upsampling/Cropped (Cropped2D(score2)) ',
         score2_c,
         verbose=verbose)

    # Sum Score2, scorePool4
    x = KL.Add(name='fcn16_fuse_pool4')([score2_c, scorePool4])
    logt('FCN Add Score2,scorePool4 Add(score2_c, scorePool4)  ',
         x,
         verbose=verbose)

    # 2x Upsampling  (padding was originally "valid", I changed it to "same" )
    x = KL.Deconvolution2D(num_classes,
                           kernel_size=(4, 4),
                           activation=None,
                           padding='same',
                           name='fcn16_upscore_pool4',
                           kernel_initializer='glorot_uniform',
                           bias_initializer='zeros',
                           strides=(2, 2))(x)

    logt('FCN upscore_pool4 (Deconv(fuse_Pool4)) ', x, verbose=verbose)

    ##-------------------------------------------------------------------------------------------------------
    ## FCN8 Specific Structure
    ##-------------------------------------------------------------------------------------------------------
    # Score Pool3 - Reduce Pool3 filters from 256 to num_classes (81)
    scorePool3 = KL.Conv2D(num_classes, (1, 1),
                           activation='relu',
                           padding='valid',
                           name='fcn8_score_pool3',
                           kernel_initializer='glorot_uniform',
                           bias_initializer='zeros')(Pool3)
    print()
    print('   --- FCN8 ----------------------------')
    logt('FCN scorePool3 (Conv2D(Pool3))  ', scorePool3, verbose=verbose)

    upscore_pool4_c = KL.Cropping2D(cropping=((0, 0), (0, 0)),
                                    name='fcn8_crop_pool4')(x)
    logt('FCN 2x Upsampling/Cropped (Cropped2D(score2)) ',
         upscore_pool4_c,
         verbose=verbose)

    # Sum  upscore_pool4_c, scorePool3
    x = KL.Add(name='fcn8_fuse_pool3')([upscore_pool4_c, scorePool3])
    logt('FCN Add Score2,scorePool4', x, verbose=verbose)

    print()

    ##-------------------------------------------------------------------------------------------------------
    ## fcn_heatmap
    ##-------------------------------------------------------------------------------------------------------
    # 8x Upsampling  (padding was originally "valid", I changed it to "same" )
    fcn_hm = KL.Deconvolution2D(num_classes,
                                kernel_size=(16, 16),
                                activation=None,
                                padding='same',
                                name='fcn8_heatmap',
                                kernel_initializer='glorot_uniform',
                                bias_initializer='zeros',
                                strides=(8, 8))(x)
    # fcn_hm = tf.identity(fcn_hm)
    fcn_hm.set_shape(feature_map.shape)
    logt('FCN fcn8_classify/heatmap  (Deconv(fuse_Pool4)) ',
         fcn_hm,
         verbose=verbose)
    fcn_hm = KL.Lambda(lambda z: tf.identity(z, name='fcn_hm'),
                       name='fcn_heatmap_lambda')(fcn_hm)
    logt('fcn_hm (final)', fcn_hm, verbose=verbose)
    print()

    # fcn_classify_shape = KB.int_shape(fcn_hm)
    # h_factor = height / fcn_classify_shape[1]
    # w_factor = width  / fcn_classify_shape[2]
    # print('   fcn_classify_shape:',fcn_classify_shape,'   h_factor : ', h_factor, '  w_factor : ', w_factor)

    # x = BilinearUpSampling2D(size=(h_factor, w_factor), name='fcn_bilinear')(x)
    # print('   FCN Bilinear upsmapling layer  shape is : ' , KB.int_shape(x), ' Keras tensor ', KB.is_keras_tensor(x) )

    ##-------------------------------------------------------------------------------------------------------
    ## fcn_heatmap
    ##-------------------------------------------------------------------------------------------------------
    fcn_sm = KL.Activation("softmax", name="fcn8_softmax")(fcn_hm)
    logt('fcn8_softmax  ', fcn_sm, verbose=verbose)
    fcn_sm = KL.Lambda(lambda z: tf.identity(z, name='fcn_sm'),
                       name='fcn_softmax_lambda')(fcn_hm)
    logt('fcn_sm (final)', fcn_sm, verbose=verbose)
    print()

    #---------------------------------------------------------------------------------------------
    # heatmap L2 normalization
    # Normalization using the  `gauss_sum` (batchsize , num_classes, height, width)
    # 17-05-2018 (New method, replace dthe previous method that usedthe transposed gauss sum
    # 17-05-2018 Replaced with normalization across the CLASS axis
    #                         normalize along the CLASS axis
    #---------------------------------------------------------------------------------------------
    # print('\n    L2 normalization ------------------------------------------------------')
    # fcn_hm_L2norm = KL.Lambda(lambda z: tf.nn.l2_normalize(z, axis = 3, name = 'fcn_heatmap_L2norm'),\
    # name = 'fcn_heatmap_L2norm')(x)
    # print('\n    normalization ------------------------------------------------------')
    # fcn_hm_norm   = KL.Lambda(normalize, name="fcn_heatmap_norm") (x)

    return fcn_hm, fcn_sm