def crop(output1, output2, i):
    o_shape2 = keras.Model(i, output2).output_shape

    output_height2 = o_shape2[1]
    output_width2 = o_shape2[2]

    o_shape1 = keras.Model(i, output1).output_shape

    output_height1 = o_shape1[1]
    output_width1 = o_shape1[2]

    cx = abs(output_width1 - output_width2)
    cy = abs(output_height2 - output_height1)

    if output_width1 > output_width2:
        output1 = layers.Cropping2D(cropping=((0, 0), (0, cx)), data_format=IMAGE_ORDERING)(output1)
    else:
        output2 = layers.Cropping2D(cropping=((0, 0), (0, cx)), data_format=IMAGE_ORDERING)(output2)

    if output_height1 > output_height2:
        output1 = layers.Cropping2D(cropping=((0, cy), (0, 0)), data_format=IMAGE_ORDERING)(output1)
    else:
        output2 = layers.Cropping2D(cropping=((0, cy), (0, 0)), data_format=IMAGE_ORDERING)(output2)

    return output1, output2
Esempio n. 2
0
 def deconv_stage1(self, x, before_pooling):
     x = UnPooling()([before_pooling, x],
                     pool_size=(1, 3, 3, 1),
                     strides=(1, 2, 2, 1))
     x = layers.Cropping2D(1)(x)
     x = layers.BatchNormalization(axis=3, name='bn_conv1')(x)
     x = layers.Conv2DTranspose(3, (7, 7),
                                strides=(2, 2),
                                padding='valid',
                                activation='relu',
                                name='conv1',
                                use_bias=False)(x)
     x = layers.Cropping2D(3)(x)
     return x
Esempio n. 3
0
    def make_generator():
        noise = layers.Input(shape=(noise_dim, ))
        x = layers.Dense(4 * 4 * 256, use_bias=False)(noise)
        x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU(0.2)(x)

        x = layers.Reshape((4, 4, 256))(x)
        x = upsample_block(x,
                           128,
                           layers.LeakyReLU(0.2),
                           strides=(1, 1),
                           use_bias=False,
                           use_bn=True,
                           padding="same",
                           use_dropout=False)
        x = upsample_block(x,
                           64,
                           layers.LeakyReLU(0.2),
                           strides=(1, 1),
                           use_bias=False,
                           use_bn=True,
                           padding="same",
                           use_dropout=False)
        x = upsample_block(x,
                           1,
                           layers.Activation("tanh"),
                           strides=(1, 1),
                           use_bias=False,
                           use_bn=True)
        x = layers.Cropping2D((2, 2))(x)

        g_model = keras.models.Model(noise, x, name="generator")
        return g_model
Esempio n. 4
0
    def __init__(self,
                 kernel_size,
                 filters,
                 instanceNorm=True,
                 regularizer=None):
        super(ResnetIdentityBlock, self).__init__()
        filters1, filters2 = filters
        self.instanceNorm = instanceNorm
        self.conv2a = layers.Conv2D(filters1,
                                    kernel_size,
                                    padding='valid',
                                    kernel_regularizer=regularizer,
                                    bias_regularizer=regularizer)
        if instanceNorm is False:
            self.norm2a = layers.BatchNormalization(
                beta_regularizer=regularizer, gamma_regularizer=regularizer)
        else:
            self.norm2a = InstanceNormalization()

        self.relu2a = layers.ReLU()

        self.conv2b = layers.Conv2D(filters2,
                                    kernel_size,
                                    padding='valid',
                                    kernel_regularizer=regularizer,
                                    bias_regularizer=regularizer)
        if instanceNorm is False:
            self.norm2b = layers.BatchNormalization(
                beta_regularizer=regularizer, gamma_regularizer=regularizer)
        else:
            self.norm2b = InstanceNormalization()

        self.crop2d = layers.Cropping2D(cropping=2)
Esempio n. 5
0
def _fit(src: Optional[Tensor], tgt: Tensor, num_filters: int,
         name: str) -> Tensor:
    if src is None:
        # Directly return target because there is nothing to fit
        return tgt
    if src.shape[2] == tgt.shape[2]:
        # Feature map shapes match, squeeze channels
        return _squeeze(src, num_filters, name + '_squeeze')
    # Shape does not match, down-sample source feature map
    x = layers.ReLU(name=name + '_relu')(src)
    p1 = layers.AveragePooling2D(pool_size=1, strides=2,
                                 name=name + '_pool1')(x)
    p1 = layers.Conv2D(num_filters // 2,
                       1,
                       use_bias=False,
                       kernel_initializer='he_normal',
                       kernel_regularizer=l2_reg,
                       name=name + '_conv1')(p1)
    p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)), name=name + '_pad')(x)
    p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)), name=name + '_crop')(p2)
    p2 = layers.AveragePooling2D(pool_size=1, strides=2,
                                 name=name + '_pool2')(p2)
    p2 = layers.Conv2D(num_filters // 2,
                       1,
                       use_bias=False,
                       kernel_initializer='he_normal',
                       kernel_regularizer=l2_reg,
                       name=name + '_conv2')(p2)
    x = layers.concatenate([p1, p2], name=name + '_concat')
    x = layers.BatchNormalization(epsilon=bn_eps,
                                  gamma_regularizer=l2_reg,
                                  name=name + '_bn')(x)
    return x
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 strides=1,
                 padding=0,
                 out_padding=0,
                 dilation=1,
                 groups=1,
                 use_bias=True,
                 data_format="channels_last",
                 **kwargs):
        super(Deconv2d, self).__init__(**kwargs)
        assert (dilation == 1)
        assert (groups == 1)
        assert (in_channels is not None)

        if isinstance(padding, int):
            padding = (padding, padding)

        self.use_crop = (padding[0] > 0) or (padding[1] > 0)
        if self.use_crop:
            self.crop = nn.Cropping2D(cropping=padding,
                                      data_format=data_format,
                                      name="crop")

        self.conv = nn.Conv2DTranspose(filters=out_channels,
                                       kernel_size=kernel_size,
                                       strides=strides,
                                       padding="valid",
                                       output_padding=out_padding,
                                       data_format=data_format,
                                       dilation_rate=dilation,
                                       use_bias=use_bias,
                                       name="conv")
Esempio n. 7
0
def get_model2(l2=0.001):
    model = Sequential()
    model.add(
        layers.Cropping2D(cropping=((50, 20), (0, 0)),
                          input_shape=(160, 320, 3)))
    model.add(layers.Lambda(lambda x: x / 255.0 - 0.5))
    # 160 x 320 x 3
    model.add(
        layers.Conv2D(24, 5, strides=2, padding='valid', activation='relu'))
    # 78 x 158 x 24
    model.add(
        layers.Conv2D(36, 5, strides=2, padding='valid', activation='relu'))
    # 37 x 77 x 36
    model.add(
        layers.Conv2D(48, 5, strides=2, padding='valid', activation='relu'))
    # 17 x 37 x 48
    model.add(
        layers.Conv2D(64, 3, strides=1, padding='valid', activation='relu'))
    # 8 x 18 x 64
    model.add(
        layers.Conv2D(64, 3, strides=1, padding='valid', activation='relu'))
    #model.add(layers.Dropout(0.3))
    # 3 x 8 x 64
    model.add(layers.Flatten())
    #model.add(layers.Dense(512, activation='relu'))
    #model.add(layers.Dropout(0.5))
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dropout(0.3))
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dropout(0.3))
    model.add(layers.Dense(12, activation='relu'))
    model.add(layers.Dropout(0.3))
    model.add(layers.Dense(1))
    return model
Esempio n. 8
0
def build_conv_ae(dim, channels, latent_dim, learning_rate=1e-3, loss_func=mae):
	if dim < 16:
		raise ValueError("Image dimensions must be at least 16x16.")
	if channels < 1:
		raise ValueError("Channels must be a positive integer.")
	if latent_dim < 1:
		raise ValueError("Latent dimension must be a positive integer.")


	# input layer
	input_layer = layers.Input((dim, dim, channels))
	X = input_layer

	# conv layers
	half_dim = dim
	counter = 0
	# Encoding
	while half_dim >= 8:
		# make layers

		# Conv2D(num_channels, window size, stride)
		X = layers.Conv2D(16*2**(counter), 3, 1, padding='same')(X)
		X = layers.BatchNormalization()(X)
		X = layers.Activation('relu')(X)
		X = layers.Conv2D(16*2**(counter), 3, 1, padding='same')(X)
		X = layers.BatchNormalization()(X)
		X = layers.Activation('relu')(X)
		X = layers.MaxPooling2D(2, 2, padding="same")(X)
		counter += 1
		half_dim = np.ceil(half_dim / 2)

	# End of encoding
	X = layers.Flatten()(X)
	latent_space = layers.Dense(latent_dim, activation="tanh")(X)
	X = layers.Dense(half_dim * half_dim * 16*2**(counter))(latent_space)
	X = layers.Reshape((half_dim, half_dim, 16*2**(counter)))(X)

	for i in range(counter):
		X = layers.Conv2DTranspose(16*2**(counter-i), 4, 2, padding='same')(X)
		X = layers.BatchNormalization()(X)
		X = layers.Activation('relu')(X)
		X = layers.Conv2DTranspose(16*2**(counter-i), 3, 1, padding='same')(X)
		X = layers.BatchNormalization()(X)
		X = layers.Activation('relu')(X)

	X = layers.Conv2D(channels, 5, 1, padding='same')(X)
	X = layers.Activation('sigmoid')(X)

	# crop layer
	reconstructed_dim = half_dim * 2 ** counter
	left_diff = int((reconstructed_dim-dim) / 2)
	right_diff = (reconstructed_dim-dim) - left_diff
	output_layer = layers.Cropping2D(((left_diff, right_diff), (left_diff, right_diff)))(X)
	
	# output layer
	model = models.Model(input_layer, output_layer)
	model.compile(Adam(learning_rate), loss=loss_func)

	return model
Esempio n. 9
0
def cnn_unet(inputs, filters=64, num_layers=4):

    # array to hold outputs of blocks
    # in the contracting path
    # for later use
    # in the expanding path
    down_blocks = []

    # build the contracting path
    # Create the downsampling layers
    net = inputs
    for i in range(num_layers):
        name = f'contract_{i+1}'
        net = unet_conv_block(inputs=net,
                              filters=filters,
                              padding='valid',
                              name=name)
        # save this layer for later reference
        down_blocks.append(net)
        # add pooling
        net = layers.MaxPooling2D((2, 2), strides=2, name=f'{name}_pool')(net)
        # increase the number of filters
        filters = filters * 2

    # build the lateral path
    net = unet_conv_block(inputs=net,
                          filters=filters,
                          padding='valid',
                          name='lateral')

    # build the expanding path
    i = num_layers
    for conv in reversed(down_blocks):
        # decrease the number of filters
        filters = filters // 2
        name = f'expand_{i}'
        # upsample
        net = layers.Conv2DTranspose(filters, (2, 2),
                                     strides=(2, 2),
                                     padding='valid',
                                     name=f'{name}_upconv')(net)
        # identify the center crop area
        center_crop = get_center_crop_location(source=conv, destination=net)
        # perform cropping
        conv = layers.Cropping2D(cropping=center_crop,
                                 name=f'{name}_crop')(conv)
        # concatenate
        net = layers.concatenate([net, conv], name=f'{name}_concatenate')
        # add one more convolutional block
        net = unet_conv_block(inputs=net,
                              filters=filters,
                              padding='valid',
                              name=name)
        i = i - 1
    # we are done
    return net
Esempio n. 10
0
def simple_net(input_shape):
    m = models.Sequential()
    m.add(layers.Lambda(lambda x: x / 127.5 - 1., input_shape=input_shape))
    m.add(layers.Cropping2D(cropping=((50, 20), (0, 0))))
    m.add(layers.Convolution2D(24, 5, 5, activation='relu'))
    m.add(layers.MaxPooling2D())
    m.add(layers.Flatten())
    m.add(layers.Dense(120))
    m.add(layers.Dense(1))
    return m
Esempio n. 11
0
def up_conv(x, skip, filter_size, similar=False):
    """
    up convolution
    Args:
        x (tensor): input tensor
        skip (tensor): residual connection
        filter_size (int): filter size
        similar (bool): similar size restoration?

    Returns: keras tensor

    """

    x = layers.Conv2DTranspose(
        filter_size,
        2,
        2,
        padding='same'
    )(x)

    image_size = tf.keras.backend.int_shape(x)[1]
    skip_size = tf.keras.backend.int_shape(skip)[1]

    crop_size = (skip_size - image_size) // 2
    cropped_tuple = (crop_size, crop_size)

    # make proper cropping to skip connections or zero padding to x's based
    # on whether similar or different input-ouput sizes are expected!
    if (skip_size - image_size) % 2:
        crop_size = crop_size
        cropped_tuple = ((crop_size, crop_size+1), (crop_size+1, crop_size))

    if not similar:  # just like original unet paper
        skip = layers.Cropping2D(
            cropped_tuple
        )(skip)
        padding='valid'
    else:  # zero padding to x
        x = layers.ZeroPadding2D(
            cropped_tuple
        )(x)
        padding='same'

    x = layers.Concatenate()([skip, x])

    for i in range(2):
        x = layers.Conv2D(
            filter_size,
            3,
            1,
            padding=padding,
            activation='relu'
        )(x)

    return x
Esempio n. 12
0
def fcn8_decoder(convs, n_classes):
    f1, f2, f3, f4, f5, = convs

    o = layers.Conv2DTranspose(n_classes,
                               kernel_size=(4, 4),
                               strides=(2, 2),
                               use_bias=False)(f5)

    o = layers.Cropping2D(cropping=(1, 1))(o)

    o2 = f4

    o2 = (layers.Conv2D(n_classes,
                        (1, 1),
                        activation='relu',
                        padding='same'))(o2)

    o = layers.Add()([o, o2])

    o = (layers.Conv2DTranspose(n_classes,
                                kernel_size=(4, 4),
                                strides=(2, 2),
                                use_bias=False))(o)

    o = layers.Cropping2D(cropping=(1, 1))(o)

    o2 = (layers.Conv2D(n_classes,
                        (1, 1),
                        activation='relu',
                        padding='same'))(f3)

    o = layers.Add()([o, o2])

    o = layers.Conv2DTranspose(n_classes,
                               kernel_size=(8, 8),
                               strides=(8, 8),
                               use_bias=False)(o)

    o = layers.Activation('softmax')(o)

    return o
Esempio n. 13
0
 def pad_to_scale(self, x, scale, size=300):
     expected = int(np.ceil(size / (2.**scale)))
     diff = expected - int(x.shape[1])
     if diff > 0:
         left = diff // 2
         right = diff - left
         x = self.reflectpad(x, (left, right))
     elif diff < 0:
         left = -diff // 2
         right = -diff - left
         x = layers.Cropping2D(((left, right), (left, right)))(x)
     return x
def decoder_block(x,
                  x_skip,
                  filters,
                  kernel_size,
                  padding='same',
                  dilation_rate=1):
    """
    Decoder block used in expansive path of UNet.
    """
    x = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(x)

    # Calculate cropping for down_tensor to concatenate with x

    if x_skip is not None:
        _, h2, w2, _ = x_skip.shape
        _, h1, w1, _ = x.shape
        h_diff, w_diff = h2 - h1, w2 - w1

        cropping = ((int(tf.math.ceil(h_diff / 2)),
                     int(tf.math.floor(h_diff / 2))),
                    (int(tf.math.ceil(w_diff / 2)),
                     int(tf.math.floor(w_diff / 2))))
        x_skip = layers.Cropping2D(cropping=cropping)(x_skip)
        x = layers.concatenate([x, x_skip], axis=3)

    x = conv2d_block(x,
                     filters,
                     kernel_size,
                     padding,
                     dilation_rate,
                     batch_norm=True,
                     activation='relu')
    x = conv2d_block(x,
                     filters,
                     kernel_size,
                     padding,
                     dilation_rate,
                     batch_norm=True,
                     activation='relu')
    x = conv2d_block(x,
                     filters,
                     kernel_size,
                     padding,
                     dilation_rate,
                     batch_norm=True,
                     activation='relu')

    return x
Esempio n. 15
0
    def call(self, x, down_tensor=None):
        x = layers.UpSampling2D(size=(2, 2), interpolation='nearest')(x)
        
        # Calculate cropping for down_tensor to concatenate with x
        if down_tensor is not None:
            _, h2, w2, _ = down_tensor.shape
            _, h1, w1, _ = x.shape
            h_diff, w_diff = h2 - h1, w2 - w1

            cropping = ((int(np.ceil(h_diff / 2)), int(np.floor(h_diff / 2))),
                        (int(np.ceil(w_diff / 2)), int(np.floor(w_diff / 2))))
            down_tensor = layers.Cropping2D(cropping=cropping)(down_tensor)        
            x = layers.concatenate([x, down_tensor], axis=3)
        
        x = self.decode(x)
        return x
Esempio n. 16
0
def nvidia_model(input_shape, dropout):
    m = models.Sequential()
    m.add(layers.Lambda(lambda x: x / 255.0 - 0.5, input_shape=input_shape))
    m.add(layers.Cropping2D(cropping=((70, 25), (0, 0))))
    m.add(layers.Convolution2D(24, 5, 2, activation='relu'))
    m.add(layers.Convolution2D(36, 5, 2, activation='relu'))
    m.add(layers.Convolution2D(48, 5, 2, activation='relu'))
    m.add(layers.Dropout(dropout))
    m.add(layers.Convolution2D(64, 3, activation='relu'))
    m.add(layers.Convolution2D(64, 3, activation='relu'))
    m.add(layers.Flatten())
    m.add(layers.Dense(100))
    m.add(layers.Dropout(dropout))
    m.add(layers.Dense(50))
    m.add(layers.Dense(10))
    m.add(layers.Dense(1))
    return m
Esempio n. 17
0
def get_generator_model(noise_dim):
    noise = layers.Input(shape=(noise_dim, ))
    x = layers.Dense(4 * 4 * 256, use_bias=False)(noise)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU(0.2)(x)

    x = layers.Reshape((4, 4, 256))(x)
    x = upsample_block(
        x,
        128,
        layers.LeakyReLU(0.2),
        strides=(1, 1),
        use_bias=False,
        use_bn=True,
        padding="same",
        use_dropout=False,
    )
    x = upsample_block(
        x,
        64,
        layers.LeakyReLU(0.2),
        strides=(1, 1),
        use_bias=False,
        use_bn=True,
        padding="same",
        use_dropout=False,
    )
    x = upsample_block(
        x,
        1,
        layers.Activation("tanh"),
        strides=(1, 1),
        use_bias=False,
        use_bn=True,
    )

    # At this point, we have an output which has the same shape as the
    # input, (32, 32, 1). We will use a Cropping2D layer to make it
    # (28, 28, 1).
    x = layers.Cropping2D((2, 2))(x)

    g_model = keras.models.Model(noise, x, name="generator")
    return g_model
Esempio n. 18
0
def make_generator_model():
    #input layer
    model = tf.keras.Sequential()
    model.add(Dense(4 * 4 * 256, use_bias=False, input_shape=(100, )))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU(0.2))
    model.add(layers.Reshape((4, 4, 256)))
    assert model.output_shape == (None, 4, 4, 256)

    #layer 1

    model.add(layers.UpSampling2D((2, 2)))
    model.add(
        layers.Conv2DTranspose(128, (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 8, 8, 128)  #maybe wrong
    model.add(layers.LeakyReLU(0.2))

    #layer 2

    model.add(layers.UpSampling2D((2, 2)))
    model.add(
        layers.Conv2DTranspose(64, (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 16, 16, 64)
    model.add(layers.LeakyReLU(0.2))

    #layer 3 / output layer
    model.add(layers.UpSampling2D((2, 2)))
    model.add(
        layers.Conv2DTranspose(1, (3, 3),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False,
                               activation='tanh'))
    assert model.output_shape == (None, 32, 32, 1)
    model.add(layers.Cropping2D((2, 2)))
    return model
def build_model():
    #Encoder
    model = tf.keras.Sequential()
    model.add(layers.Conv2D(32,kernel_size = (3,3), input_shape = (227,227,1), activation='relu', name='conv_autoencoder1', padding='same'))
    model.add(layers.MaxPooling2D(pool_size=2, strides = 2, padding='same',name='pool_autoencoder1'))
    model.add(layers.Conv2D(16, kernel_size=(3,3), activation='relu', name='conv_autoencoder2', padding='same'))
    model.add(layers.MaxPooling2D(pool_size=2,strides = 2, padding='same',name='pool_autoencoder2'))
    model.add(layers.Conv2D(30, kernel_size=(3,3), activation='relu', name='conv_autoencoder3', padding='same'))
    model.add(layers.MaxPooling2D(pool_size=2,strides = 2, padding='same',name='pool_autoencoder3'))

    #Decoder
    model.add(layers.Conv2D(30, kernel_size=(3,3), activation='relu', name='conv_autoencoder4', padding='same'))
    model.add(layers.UpSampling2D((2,2), name='up_autoencoder1'))
    model.add(layers.Conv2D(16, kernel_size=(3,3), activation='relu', name='conv_autoencoder5', padding='same'))
    model.add(layers.UpSampling2D((2,2), name='up_autoencoder2'))
    model.add(layers.Conv2D(32, kernel_size=(3,3), activation='relu', name='conv_autoencoder6', padding='same'))
    model.add(layers.UpSampling2D((2,2),name='up_autoencoder3'))
    model.add(layers.Conv2D(1,(3,3),activation='linear',padding='same',name='conv_autoencoder7'))
    model.add(layers.Cropping2D(((0,5),(0,5)),name='crop'))
    return model
Esempio n. 20
0
def conv2d_stats_block(input_layer, cropping, stats_block):
    x = layers.Cropping2D(cropping, name='crop_' + stats_block)(input_layer)
    x = layers.Conv2D(8, (5, 5), (2, 2), name='conv2d_' + stats_block)(x)
    x = layers.BatchNormalization(name='batchnorm_' + stats_block)(x)
    x = layers.MaxPool2D((2, 2))(x)

    x = convolutional_block(x, 3, [8, 8, 16], 2, stats_block + '_a', s=1)
    x = identity_block(x, 3, [8, 8, 16], 2, stats_block + '_b')
    x = identity_block(x, 3, [8, 8, 16], 2, stats_block + '_c')

    x = convolutional_block(x, 3, [16, 16, 32], 3, stats_block + '_a', s=2)
    x = identity_block(x, 3, [16, 16, 32], 3, stats_block + '_b')
    x = identity_block(x, 3, [16, 16, 32], 3, stats_block + '_c')
    x = identity_block(x, 3, [16, 16, 32], 3, stats_block + '_d')

    x = convolutional_block(x, 3, [32, 32, 64], 4, stats_block + '_a', s=2)
    x = identity_block(x, 3, [32, 32, 64], 4, stats_block + '_b')
    x = identity_block(x, 3, [32, 32, 64], 4, stats_block + '_c')
    x = identity_block(x, 3, [32, 32, 64], 4, stats_block + '_d')

    return layers.AveragePooling2D((3, 3))(x)
Esempio n. 21
0
 def __init__(self, name_prefix, filters, data_format, **kwargs):
     self.name_prefix = name_prefix
     self.filters = filters
     self.data_format = data_format
     self.relu = kl.ReLU(name=f'{name_prefix}relu_')
     self.x0_avgpool = kl.AveragePooling2D(
         pool_size=(1, 1),
         strides=(2, 2),
         padding="valid",
         data_format=self.data_format,
         name=f'{name_prefix}avepool2d_a_')
     self.x0_conv = kl.Conv2D(filters=filters,
                              kernel_size=(1, 1),
                              strides=(1, 1),
                              padding="valid",
                              data_format=self.data_format,
                              name=f"{name_prefix}conv2d_a_")
     self.x1_zeropad = kl.ZeroPadding2D(padding=((0, 1), (0, 1)),
                                        data_format=self.data_format,
                                        name=f"{name_prefix}zeropad2d_b_")
     self.x1_cropping = kl.Cropping2D(cropping=((1, 0), (1, 0)),
                                      data_format=self.data_format,
                                      name=f"{name_prefix}crop2d_b_")
     self.x1_avgpool = kl.AveragePooling2D(
         pool_size=(1, 1),
         strides=(2, 2),
         padding="valid",
         data_format=self.data_format,
         name=f"{name_prefix}avepool2d_b_")
     self.x1_conv = kl.Conv2D(filters=filters,
                              kernel_size=(1, 1),
                              strides=(1, 1),
                              padding="valid",
                              data_format=self.data_format,
                              name=f"{name_prefix}conv2d_b_")
     self.concat = kl.Concatenate(name=f"{name_prefix}concat_")
     self.bn = kl.BatchNormalization(name=f"{name_prefix}bn_")
     super(FactorizedReduction_K, self).__init__(**kwargs)
Esempio n. 22
0
def get_model():
    model = Sequential()
    model.add(
        layers.Cropping2D(cropping=((50, 20), (0, 0)),
                          input_shape=(160, 320, 3)))
    model.add(layers.Lambda(lambda x: x / 255.0 - 0.5))
    model.add(layers.Conv2D(8, 3, activation='relu'))
    model.add(layers.Conv2D(16, 3, activation='relu'))
    model.add(layers.MaxPooling2D(2))
    model.add(layers.Conv2D(32, 3, activation='relu'))
    model.add(layers.Conv2D(32, 3, activation='relu'))
    model.add(layers.MaxPooling2D(2))
    model.add(layers.Conv2D(64, 3, activation='relu'))
    model.add(layers.Conv2D(64, 3, activation='relu'))
    model.add(layers.MaxPooling2D(2))
    model.add(layers.Flatten())
    #model.add(layers.Dense(512, activation='relu', kernel_regularizer='l2'))
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dropout(0.3))
    model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dropout(0.3))
    model.add(layers.Dense(1))
    return model
Esempio n. 23
0
def make_model(input_shape):
    inputs = keras.Input(shape=input_shape)

    left_crop = int(input_shape[0] * 0.5)
    bottom_crop = int(input_shape[1] * 0.5)

    x = layers.experimental.preprocessing.Rescaling(1. / 255)(inputs)
    x = layers.Cropping2D(cropping=((0, bottom_crop), (left_crop, 0)))(x)

    x = layers.Conv2D(32, 3, strides=2, padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPool2D()(x)

    x = layers.Conv2D(64, 3, padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPool2D()(x)

    x = layers.GlobalAveragePooling2D()(x)

    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(1, activation='sigmoid')(x)
    return keras.Model(inputs, outputs)
Esempio n. 24
0
    def _build_model(self):
        decoder = self._decoder()
        encoder = self._encoder()

        skips = encoder(self.inputs)
        mobilenet_b16 = skips[-1]
        skips = reversed(skips[:-1])  # all but smallest layer b16

        conv = mobilenet_b16  # (8,10,320)

        for up, skip in zip(decoder, skips):
            conv = self.stride_up(conv, up[0], up[1])  # (16,20,512)
            if conv.shape[1] == 16:
                conv = layers.Cropping2D(cropping=((1, 0), (0, 0)))(conv)
            conv = layers.concatenate([conv, skip])

        self.output = layers.Conv2DTranspose(self.output_channels,
                                             3,
                                             strides=2,
                                             padding='same',
                                             activation='softmax')(
                                                 conv)  # 64x64 -> 128x128

        return Model(inputs=self.inputs, outputs=self.output)
Esempio n. 25
0
def VanillaUnet(num_class = 1, img_shape = (256,256,3)):

    concat_axis = 3
    # input
    inputs = layers.Input(shape = img_shape)

    # Unet convolution block 1
    conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(inputs)
    conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

    # Unet convolution block 2
    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

    # Unet convolution block 3
    conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

    # Unet convolution block 4
    conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
    conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
    pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

    # Unet convolution block 5
    conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
    conv5 = layers.Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

    # Unet up-sampling block 1; Concatenation with crop_conv4
    up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
    ch, cw = get_crop_shape(conv4, up_conv5)
    crop_conv4 = layers.Cropping2D(cropping=(ch,cw))(conv4)
    up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
    conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
    conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

    # Unet up-sampling block 2; Concatenation with crop_conv3
    up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = layers.Cropping2D(cropping=(ch,cw))(conv3)
    up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis) 
    conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
    conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

    # Unet up-sampling block 3; Concatenation with crop_conv2
    up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = layers.Cropping2D(cropping=(ch,cw))(conv2)
    up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

    # Unet up-sampling block 4; Concatenation with crop_conv1
    up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = layers.Cropping2D(cropping=(ch,cw))(conv1)

    up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

    ch, cw = get_crop_shape(inputs, conv9)
    conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
    conv10 = layers.Conv2D(num_class, (1, 1))(conv9)
    model = Model(inputs=inputs, outputs=conv10)

    return model
Esempio n. 26
0
u9 = upsample(8, (2, 2), strides=(2, 2), padding='same')(c8)
u9 = layers.concatenate([u9, c1], axis=3)
u9 = layers.Dropout(dropout)(u9)
c9 = layers.Conv2D(8, (3, 3),
                   kernel_initializer='he_uniform',
                   bias_initializer='zeros',
                   activation='relu',
                   padding='same')(u9)
c9 = layers.Conv2D(8, (3, 3),
                   kernel_initializer='he_uniform',
                   bias_initializer='zeros',
                   activation='relu',
                   padding='same')(c9)

d = layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
d = layers.Cropping2D((EDGE_CROP, EDGE_CROP))(d)
d = layers.ZeroPadding2D((EDGE_CROP, EDGE_CROP))(d)
if NET_SCALING is not None:
    d = layers.UpSampling2D(NET_SCALING)(d)

seg_model = models.Model(inputs=[input_img], outputs=[d])
print()
#print()
#print(seg_model.summary())
print()


## evalaution criteria
# dice coefficicent
def dice_coef(y_true, y_pred, smooth=1):
    intersection = K.sum(y_true * y_pred, axis=[1, 2, 3])
    def __init__(self, img_shape, num_class, d=32, weights=weights_url):
        concat_axis = 3
        inputs = layers.Input(shape=img_shape)

        conv1 = layers.Conv2D(d, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv1_1')(inputs)
        conv1 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(pool1)
        conv2 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(pool2)
        conv3 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(pool3)
        conv4 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = layers.Conv2D(d * 16, (3, 3),
                              activation='relu',
                              padding='same')(pool4)
        conv5 = layers.Conv2D(d * 16, (3, 3),
                              activation='relu',
                              padding='same')(conv5)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch, cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(up6)
        conv6 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
        conv7 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(up7)
        conv7 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(up8)
        conv8 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(up9)
        conv9 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(conv9)

        ch, cw = self.get_crop_shape(inputs, conv9)
        conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0],
                                                               cw[1])))(conv9)
        conv10 = layers.Conv2D(num_class, (1, 1), activation="sigmoid")(conv9)

        super().__init__(inputs=inputs, outputs=conv10)

        if weights is not None:
            self.load_weight_file(weights)
Esempio n. 28
0
def UNet():

    concat_axis = 3
    inputss = Input((512, 512, 1))
    print(inputss.shape)

    conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1_1')(inputss)
    conv1 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
    pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)
    conv4 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
    conv4 = Dropout(0.2)(conv4)
    #     pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    #     conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)
    #     conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)

    #     up_conv5 = UpSampling2D(size=(2, 2))(conv5)
    #     ch, cw = get_crop_shape(conv4, up_conv5)
    #     crop_conv4 = layers.Cropping2D(cropping=(ch,cw))(conv4)
    #     up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
    #     conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(up6)
    #     conv6 = layers.Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)

    up_conv6 = layers.UpSampling2D(size=(2, 2))(conv4)
    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3)
    up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
    conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(up7)
    conv7 = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)

    up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2)
    up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(up8)
    conv8 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)

    up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1)
    up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(up9)
    conv9 = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)

    ch, cw = get_crop_shape(inputss, conv9)
    conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
    conv10 = layers.Conv2D(1, (1, 1))(conv9)

    print(conv10.shape)


    # conv1 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputss)
    # print(conv1.shape)
    # conv1 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
    # print(conv1.shape)
    # pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    # print(pool1.shape)
    # print('\n')
    #
    # conv2 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
    # print(conv2.shape)
    # conv2 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
    # print(conv2.shape)
    # pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    # print(pool2.shape)
    # print('\n')
    #
    # conv3 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
    # print(conv3.shape)
    # conv3 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
    # print(conv3.shape)
    # pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    # print(pool3.shape)
    # print('\n')
    #
    # conv4 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
    # print(conv4.shape)
    # conv4 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
    # print(conv4.shape)
    # drop4 = Dropout(0.5)(conv4)
    # pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
    # print(pool4.shape)
    # print('\n')
    # #
    # # conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
    # # print(conv5.shape)
    # # conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
    # # print(conv5.shape)
    # # drop5 = Dropout(0.5)(conv5)
    # # print('\n')
    # #
    # # up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
    # #     UpSampling2D(size=(2, 2))(drop5))
    # # print(up6.shape)
    # # print(drop4.shape)
    # # merge6 = merge([drop4, up6], mode='concat', concat_axis=3)
    # # print('merge: ')
    # # print(merge6.shape)
    # # conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
    # # conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
    # #
    # up7 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
    #     UpSampling2D(size=(2, 2))(conv4))
    # merge7 = merge([conv3, up7], mode='concat', concat_axis=3)
    # conv7 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
    # conv7 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
    #
    # up8 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
    #     UpSampling2D(size=(2, 2))(conv7))
    # merge8 = concatenate([conv2, up8], axis=3)
    # conv8 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
    # conv8 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)
    #
    # up9 = Conv2D(32, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
    #     UpSampling2D(size=(2, 2))(conv8))
    # merge9 = concatenate([conv1, up9], axis=3)
    # conv9 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
    # conv9 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
    # conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
    # # conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
    # conv10 = Softmax()(conv9)

    print("llllllllllllllllllllllllast")
    Outmodel = Model(inputs=inputss, outputs=conv10)
    Outmodel.summary()
    print(conv10.shape)

    return Outmodel
Esempio n. 29
0
                   kernel_size=(3, 3),
                   padding="valid",
                   activation="relu")(l4_5)
l5 = layers.Conv2D(filters=1024,
                   kernel_size=(3, 3),
                   padding="valid",
                   activation="relu")(l5)

#level4
l6 = layers.Conv2DTranspose(filters=512,
                            kernel_size=(2, 2),
                            strides=(2, 2),
                            padding="valid")(l5)
#l6 = layers.UpSampling2D(size=(2,2),interpolation='nearest')(l5)

l4_connection = layers.Cropping2D(cropping=((4, 4), (4, 4)))(l4)
print(l6.shape)
print(l4_connection.shape)
print(l4.shape)
l6 = tf.keras.layers.concatenate([l6, l4_connection])
print(l6.shape)
#l6 = layers.Conv2D(filters=1024, kernel_size=(3,3), padding="valid", activation="relu")(l6)
l6 = layers.Conv2D(filters=512,
                   kernel_size=(3, 3),
                   padding="same",
                   activation="relu")(l6)
l6 = layers.Conv2D(filters=256,
                   kernel_size=(2, 2),
                   padding="same",
                   activation="relu")(l6)
Esempio n. 30
0
sConv03 = cl.convBlock(256, 3, 2)(sConv02)

res01 = cl.resBlock(256, 3)(sConv03)
res02 = cl.resBlock(256, 3)(res01)

tConv01 = cl.convTransBlock(128, 3, 2)(res02)
add = layers.Add()([tConv01, sConv02])
tConv02 = cl.convTransBlock(64, 3, 2)(add)
tConv03 = cl.convTransBlock(24, 3, 4)(tConv02)

synt = layers.Conv2D(24, 3, padding="same")(tConv03)

fft = cl.fftBlock()(synt)
con = cl.conBlock()([mul, dMZ, fft])

trimmedCon = layers.Cropping2D(cropping=((3, 3), (3, 3)))(con)

underChan = cl.fullChanBlock()(trimmedCon)
fullChan = cl.fullChanBlock()(dL)

model = models.Model(inputs=[dL, dM], outputs=[fullChan, underChan])
optimizer = optimizers.Adam()

train = glob(r"C:/Datasets/*.h5")
val = glob(r"C:/Datasets/*.h5")
R = 4  # *2
sample_n = 32  # //2 # 'calibration center' size
random = False  # only applicable for uniform = False, really...
uniform = False
centered = False
dim = (218, 170)  # input dimensions