Ejemplo n.º 1
0
 def build(self, input_shape):
     self.conv = layers.Conv2D(self.num_layers,
                               self.kernel_size,
                               self.stride,
                               self.padding,
                               activation=self.activation,
                               kernel_regularizer=self.kernel_regularizer)
     if self.batch_normalize:
         self.bn = layers.BatchNormalization()  # scale=False
         '''
Ejemplo n.º 2
0
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
  """A residual block.

  Arguments:
    x: input tensor.
    filters: integer, filters of the bottleneck layer.
    kernel_size: default 3, kernel size of the bottleneck layer.
    stride: default 1, stride of the first layer.
    conv_shortcut: default True, use convolution shortcut if True,
        otherwise identity shortcut.
    name: string, block label.

  Returns:
    Output tensor for the residual block.
  """
  bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

  if conv_shortcut:
    shortcut = layers.Conv2D(
        4 * filters, 1, strides=stride, name=name + '_0_conv')(x)
    shortcut = layers.BatchNormalization(
        axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut)
  else:
    shortcut = x

  x = layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv')(x)
  x = layers.BatchNormalization(
      axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)
  x = layers.Activation('relu', name=name + '_1_relu')(x)

  x = layers.Conv2D(
      filters, kernel_size, padding='SAME', name=name + '_2_conv')(x)
  x = layers.BatchNormalization(
      axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)
  x = layers.Activation('relu', name=name + '_2_relu')(x)

  x = layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
  x = layers.BatchNormalization(
      axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x)

  x = layers.Add(name=name + '_add')([shortcut, x])
  x = layers.Activation('relu', name=name + '_out')(x)
  return x
Ejemplo n.º 3
0
def transition_block(x, name, dropout_rate, reduction=0.5):
    x = layers.BatchNormalization(name=name + '_bn')(x)
    x = layers.Activation('relu', name=name + '_relu')(x)
    x = layers.Conv2D(int(x.get_shape().as_list()[-1] * reduction),
                      1,
                      padding='same',
                      name=name + '_conv')(x)
    x = layers.Dropout(rate=dropout_rate)(x)
    x = layers.AveragePooling2D(strides=2, name=name + '_pool')(x)
    return x
def build_model():
    sequences = layers.Input(shape=(MAX_LENGTH, ))
    embedded = layers.Embedding(MAX_FEATURES, 64)(sequences)
    x = layers.Conv1D(64, 3, activation='relu')(embedded)
    x = layers.BatchNormalization()(x)
    x = layers.MaxPool1D(3)(x)
    x = layers.Conv1D(64, 5, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.MaxPool1D(5)(x)
    x = layers.Conv1D(64, 5, activation='relu')(x)
    x = layers.GlobalMaxPool1D()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(100, activation='relu')(x)
    predictions = layers.Dense(1, activation='sigmoid')(x)
    model = models.Model(inputs=sequences, outputs=predictions)
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['binary_accuracy'])
    return model
Ejemplo n.º 5
0
def _identity_block(input_tensor, kernel_size, filters, stage, block):
    """The identity block is the block that has no conv layer at shortcut.

    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of
            middle conv layer at main file_list
        filters: list of integers, the filters of 3 conv layer at main file_list
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names

    # Returns
        Output tensor for the block.
    """
    filters1, filters2, filters3 = filters
    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.Conv2D(filters1, (1, 1),
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2a')(input_tensor)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters2, kernel_size,
                      padding='same',
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2b')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters3, (1, 1),
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2c')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = layers.add([x, input_tensor])
    x = layers.Activation('relu')(x)
    return x
Ejemplo n.º 6
0
def make_generator_model():
    """ implements generate.

  Returns:
    model.

  """
    model = tf.keras.Sequential()
    model.add(layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100, )))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256
                                  )  # Note: None is the batch size

    model.add(
        layers.Conv2DTranspose(128, (5, 5),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(64, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(1, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False,
                               activation='tanh'))
    assert model.output_shape == (None, 28, 28, 1)

    return model
Ejemplo n.º 7
0
def res_conv_block(input_layers, num_filters):
    res_encoder = layers.Conv2D(num_filters, (1, 1),
                                kernel_initializer=initializer,
                                padding="same")(input_layers)
    res_encoder = layers.BatchNormalization()(res_encoder)
    res_encoder = layers.Activation("relu")(res_encoder)
    #encoder = layers.SeparableConv2D(num_filters, (3, 3), depthwise_initializer=initializer,
    #                                 pointwise_initializer=initializer, padding="same")(input_layers)
    encoder = layers.Conv2D(num_filters, (3, 3),
                            kernel_initializer=initializer,
                            padding="same")(input_layers)
    encoder = layers.BatchNormalization()(encoder)
    encoder = layers.Activation("relu")(encoder)
    encoder = layers.Conv2D(num_filters, (3, 3),
                            kernel_initializer=initializer,
                            padding="same")(encoder)
    encoder = layers.BatchNormalization()(encoder)
    encoder = layers.Activation("relu")(encoder)
    return encoder + res_encoder
Ejemplo n.º 8
0
def create_classifier():

    with tf.name_scope("Disc"):
        X = kl.Input((32, 32, 3), name="X")

        layer = kl.Conv2D(filters=16,
                          kernel_size=3,
                          padding="same",
                          activation="relu")(X)
        layer = kl.BatchNormalization()(layer)
        layer = kl.Conv2D(filters=32,
                          kernel_size=3,
                          padding="same",
                          activation="relu")(layer)
        layer = kl.BatchNormalization()(layer)
        layer = kl.MaxPool2D()(layer)

        layer = kl.Conv2D(filters=64,
                          kernel_size=4,
                          padding="same",
                          activation="relu")(layer)
        layer = kl.BatchNormalization()(layer)
        layer = kl.MaxPool2D()(layer)

        layer = kl.Conv2D(filters=128,
                          kernel_size=4,
                          padding="same",
                          activation="relu")(layer)
        layer = kl.BatchNormalization()(layer)
        layer = kl.MaxPool2D()(layer)

        layer = kl.Dropout(0.2)(layer)

        layer = kl.Flatten()(layer)
        fidout = layer
        layer = kl.Dense(512, activation="relu")(layer)
        layer = kl.Dropout(0.2)(layer)

        D_out = kl.Dense(10, activation="softmax")(layer)

        model = k.Model(inputs=X, outputs=D_out)
        fidmodel = k.Model(inputs=X, outputs=fidout)
    return model, fidmodel
Ejemplo n.º 9
0
def cgan_generator(c_dim, z_dim):

    c = layers.Input(shape=[c_dim])
    z = layers.Input(shape=[z_dim])
    x = layers.concatenate([c, z])

    x = layers.Dense(512)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dense(512, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dense(512, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dense(512, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dense(256, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dense(99, activation='tanh')(x)

    return Model(inputs=[c, z], outputs=x)
Ejemplo n.º 10
0
def make_generator_model():
    """ Generating network structure.

  Returns:
    Sequential model.

  """
    model = tf.keras.Sequential()
    model.add(layers.Dense(8 * 8 * 256, use_bias=False, input_shape=(128, )))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((8, 8, 256)))
    assert model.output_shape == (None, 8, 8, 256
                                  )  # Note: None is the batch size

    model.add(
        layers.Conv2DTranspose(128, (5, 5),
                               strides=(1, 1),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 8, 8, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(64, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False))
    assert model.output_shape == (None, 16, 16, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(
        layers.Conv2DTranspose(3, (5, 5),
                               strides=(2, 2),
                               padding='same',
                               use_bias=False,
                               activation='tanh'))
    assert model.output_shape == (None, 32, 32, 3)

    return model
Ejemplo n.º 11
0
def convnet(input_shape,
            output_size,
            conv_filters=(64, 64, 64),
            conv_kernel_sizes=(3, 3, 3),
            conv_strides=(2, 2, 2),
            use_global_average_pool=False,
            normalization_type=None,
            normalization_kwargs={},
            downsampling_type='conv',
            name='convnet',
            *args,
            **kwargs):
    assert downsampling_type in ('pool', 'conv'), downsampling_type

    img_input = layers.Input(shape=input_shape, dtype=tf.float32)
    x = img_input

    for (conv_filter, conv_kernel_size,
         conv_stride) in zip(conv_filters, conv_kernel_sizes, conv_strides):
        x = layers.Conv2D(
            filters=conv_filter,
            kernel_size=conv_kernel_size,
            strides=(conv_stride if downsampling_type == 'conv' else 1),
            padding="SAME",
            activation='linear',
            *args,
            **kwargs)(x)

        if normalization_type == 'batch':
            x = layers.BatchNormalization(**normalization_kwargs)(x)
        elif normalization_type == 'layer':
            x = LayerNormalization(**normalization_kwargs)(x)
        elif normalization_type == 'group':
            x = GroupNormalization(**normalization_kwargs)(x)
        elif normalization_type == 'instance':
            x = InstanceNormalization(**normalization_kwargs)(x)
        elif normalization_type == 'weight':
            raise NotImplementedError(normalization_type)
        else:
            assert normalization_type is None, normalization_type

        x = layers.LeakyReLU()(x)

        if downsampling_type == 'pool' and conv_stride > 1:
            x = getattr(tf.keras.layers, 'AvgPool2D')(pool_size=conv_stride,
                                                      strides=conv_stride)(x)

    if use_global_average_pool:
        x = layers.GlobalAveragePooling2D(name='average_pool')(x)
    else:
        x = tf.keras.layers.Flatten()(x)

    model = models.Model(img_input, x, name=name)
    model.summary()
    return model
Ejemplo n.º 12
0
    def __init__(self,
                 image_shape: tuple,
                 embedding_size: int = 16,
                 conv_layers: Optional[List[ConvLayerConfig]] = None,
                 l2_param_penalty: float = 0.00):

        pn = tf.keras.regularizers.l2(
            l2_param_penalty) if l2_param_penalty > 0 else None

        if conv_layers is None:
            conv_layers = [
                ConvLayerConfig(stride=2,
                                filter_size=3,
                                nr_filters=8,
                                activation='elu',
                                batch_norm=True),
                ConvLayerConfig(stride=2,
                                filter_size=3,
                                nr_filters=int(image_shape[-1]),
                                activation='elu',
                                batch_norm=True),
            ]

        img_s = [int(x) for x in image_shape[:2]]
        for cl in conv_layers:
            img_s = [s / cl.stride for s in img_s]
        initial_shape = (int(img_s[0]), int(img_s[1]), 1)
        assert np.allclose(
            initial_shape[:2],
            img_s[:2]), 'eventual size divided by strides should be an integer'

        encoding = layers.Input(shape=(embedding_size, ),
                                name='embedding_input',
                                dtype=tf.float32)

        e = layers.Dense(units=np.prod(initial_shape),
                         activation='elu')(encoding)
        e = layers.Reshape(target_shape=initial_shape)(e)

        for cl in conv_layers:
            e = layers.Conv2DTranspose(filters=cl.nr_filters,
                                       kernel_size=(cl.filter_size,
                                                    cl.filter_size),
                                       strides=(cl.stride, cl.stride),
                                       data_format='channels_last',
                                       padding='same',
                                       activation=cl.activation,
                                       kernel_regularizer=pn)(e)
            if cl.batch_norm:
                e = layers.BatchNormalization()(e)
        rgb_norm = e

        assert rgb_norm.shape[1:] == image_shape

        self.model = tf.keras.Model(inputs=[encoding], outputs=[rgb_norm])
Ejemplo n.º 13
0
def build_discriminator_net(img_shape):
    """
    Base network that the supervised/unsupervised children will use
    Vanilla CNN
    """
    model = Sequential()

    # Convolutional layer, from 28x28x1 into 14x14x32 tensor
    model.add(
        Conv2D(32,
               kernel_size=3,
               strides=2,
               input_shape=img_shape,
               padding='same'))
    model.add(LeakyReLU(alpha=0.01))

    # Convolutional layer, from 14x14x32 into 7x7x64 tensor
    model.add(
        Conv2D(64,
               kernel_size=3,
               strides=2,
               input_shape=img_shape,
               padding='same'))
    model.add(layers.BatchNormalization())
    model.add(LeakyReLU(alpha=0.01))

    # Convolutional layer, from 7x7x64 into 3x3x128 tensor
    model.add(
        Conv2D(128,
               kernel_size=3,
               strides=2,
               input_shape=img_shape,
               padding='same'))
    model.add(layers.BatchNormalization())
    model.add(LeakyReLU(alpha=0.01))

    # Starts differing from base CNN GAN here
    model.add(layers.Dropout(0.5))
    model.add(layers.Flatten())
    model.add(layers.Dense(NUM_CLASSES))

    return model
Ejemplo n.º 14
0
def create_gen(Zt,
               Ct,
               img_shape=(28, 28, 1),
               noise_shape=(7, 7, 1),
               filter_size=3,
               strides=[2, 2],
               filters=[128, 64]):

    with tf.name_scope("Gen"):

        # Generator
        Z = kl.Input(noise_shape, tensor=Zt, name="Z")
        C = kl.Input(img_shape, tensor=Ct, name="C")

        Zf = kl.Flatten()(Z)
        layer = kl.Dense(np.prod(noise_shape) * 7)(Zf)
        layer = kl.Reshape(
            (noise_shape[0], noise_shape[1], noise_shape[-1] * 7))(layer)

        for l in range(len(filters)):
            layer = kl.Conv2DTranspose(filters=filters[l],
                                       kernel_size=filter_size,
                                       padding="same",
                                       strides=strides[l],
                                       activation="relu")(layer)
            layer = kl.BatchNormalization()(layer)

        for l in range(len(filters)):
            layer = kl.Conv2DTranspose(filters=filters[l],
                                       kernel_size=filter_size,
                                       padding="same",
                                       dilation_rate=l + 2,
                                       activation="relu")(layer)
            layer = kl.BatchNormalization()(layer)

        G_out = kl.Conv2DTranspose(filters=img_shape[-1],
                                   kernel_size=filter_size,
                                   activation="tanh",
                                   padding="same")(layer)

        model = k.Model(inputs=[Z, C], outputs=G_out)
    return model
Ejemplo n.º 15
0
def conv_block_basic(input_tensor,
                     filters,
                     stage,
                     block,
                     strides=(2, 2),
                     weight_decay=5e-4,
                     use_bias=False):

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.Conv2D(filters, (3, 3),
                      strides=strides,
                      padding='same',
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2a',
                      kernel_regularizer=l2(weight_decay),
                      use_bias=use_bias)(input_tensor)
    x = layers.BatchNormalization(name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2b',
                      kernel_regularizer=l2(weight_decay),
                      use_bias=use_bias)(x)
    x = layers.BatchNormalization(name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    shortcut = layers.Conv2D(filters, (1, 1),
                             strides=strides,
                             padding='valid',
                             kernel_initializer='he_normal',
                             name=conv_name_base + '1',
                             kernel_regularizer=l2(weight_decay),
                             use_bias=use_bias)(input_tensor)
    shortcut = layers.BatchNormalization(name=bn_name_base + '1')(shortcut)

    x = layers.add([x, shortcut])
    x = layers.Activation('relu')(x)
    return x
Ejemplo n.º 16
0
def make_last_layers_mobilenet(x, id, num_filters, out_filters):
    x = compose(
        kl.Conv2D(num_filters,
                  kernel_size=1,
                  padding='same',
                  use_bias=False,
                  name='block_' + str(id) + '_conv'),
        kl.BatchNormalization(momentum=0.9, name='block_' + str(id) + '_BN'),
        kl.LeakyReLU(name='block_' + str(id) + '_relu6'),
        MobilenetSeparableConv2D(2 * num_filters,
                                 kernel_size=(3, 3),
                                 use_bias=False,
                                 padding='same'),
        kl.Conv2D(num_filters,
                  kernel_size=1,
                  padding='same',
                  use_bias=False,
                  name='block_' + str(id + 1) + '_conv'),
        kl.BatchNormalization(momentum=0.9,
                              name='block_' + str(id + 1) + '_BN'),
        kl.LeakyReLU(name='block_' + str(id + 1) + '_relu6'),
        MobilenetSeparableConv2D(2 * num_filters,
                                 kernel_size=(3, 3),
                                 use_bias=False,
                                 padding='same'),
        kl.Conv2D(num_filters,
                  kernel_size=1,
                  padding='same',
                  use_bias=False,
                  name='block_' + str(id + 2) + '_conv'),
        kl.BatchNormalization(momentum=0.9,
                              name='block_' + str(id + 2) + '_BN'),
        kl.LeakyReLU(name='block_' + str(id + 2) + '_relu6'))(x)

    y = compose(
        MobilenetSeparableConv2D(2 * num_filters,
                                 kernel_size=(3, 3),
                                 use_bias=False,
                                 padding='same'),
        kl.Conv2D(out_filters, kernel_size=1, padding='same',
                  use_bias=False))(x)
    return x, y
Ejemplo n.º 17
0
def resnet_layer(inputs,
                 num_filters,
                 kernel_size=3,
                 strides=1,
                 activation='relu',
                 batch_normalization=True,
                 conv_first=True):
    ''' 2D Convolution-Batch Normalization-Activation stack builder
    # Arguments
        inputs (tensor): input tensor from input image or previous layer
        num_filters (int): Conv2D number of filters
        kernel_size (int): Conv2D square kernel dimensions
        strides (int): Conv2D square stride dimensions
        activation (string): activation name
        batch_normalization (bool): whether to include batch normalization
        conv_first (bool): conv-bn-activation (True) or
            bn-activation-conv (False)

    # Returns
        x (tensor): tensor as input to the next layer
    '''
    conv = layers.Conv2D(num_filters,
                         kernel_size=kernel_size,
                         strides=strides,
                         padding='same',
                         kernel_initializer='he_normal',
                         kernel_regularizer=l2(1e-4))
    x = inputs
    if conv_first:
        x = conv(x)
        if batch_normalization:
            x = layers.BatchNormalization()(x)
        if activation is not None:
            x = layers.Activation(activation)(x)
    else:
        if batch_normalization:
            x = layers.BatchNormalization()(x)
        if activation is not None:
            x = layers.Activation(activation)(x)
        x = conv(x)

    return x
Ejemplo n.º 18
0
def Convolutional_LSTM(n_frames, width, height, channels):
    # take input movies of shape ( )
    input = layers.Input(shape=(n_frames, width, height, channels))

    x = layers.ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)(input)
    x = layers.BatchNormalization()(x)
    x = layers.ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ConvLSTM2D(filters=40, kernel_size=(3, 3), padding='same', return_sequences=True)(x)
    x = layers.BatchNormalization()(x)
    x = layers.Conv3D(filters=1, kernel_size=(3, 3, 3), padding='same', data_format='channels_last')(x)
    x = layers.Activation('sigmoid')(x)

    conv_lstm = tf.keras.models.Model(input=input, output=x)
    conv_lstm.compile(optimizer='adadelta', loss='binary_crossentropy')

    # return a movie of identical shape
    return conv_lstm
Ejemplo n.º 19
0
 def decoder_block(self, input_tensor, concat_tensor, num_filters,
                   dropout_rate):
     decoder = layers.Conv2DTranspose(num_filters, (2, 2),
                                      strides=(self.stride, self.stride),
                                      padding='same')(input_tensor)
     decoder = layers.concatenate([concat_tensor, decoder], axis=-1)
     dropout = layers.Dropout(dropout_rate)(decoder)
     decoder = layers.BatchNormalization()(dropout)
     decoder = layers.Activation('relu')(decoder)
     decoder = layers.Conv2D(num_filters,
                             (self.kernel_size, self.kernel_size),
                             padding='same')(decoder)
     decoder = layers.BatchNormalization()(decoder)
     decoder = layers.Activation('relu')(decoder)
     decoder = layers.Conv2D(num_filters,
                             (self.kernel_size, self.kernel_size),
                             padding='same')(decoder)
     decoder = layers.BatchNormalization()(decoder)
     decoder = layers.Activation('relu')(decoder)
     return decoder
Ejemplo n.º 20
0
def bn_relu(inputs,
            axis=-1,
            momentum=BATCH_NORM_DECAY,
            epsilon=BATCH_NORM_EPSILON,
            bn_name=None):
    x = layers.BatchNormalization(axis=axis,
                                  momentum=momentum,
                                  epsilon=epsilon,
                                  name=bn_name)(inputs)
    x = layers.ReLU()(x)
    return x
Ejemplo n.º 21
0
def resnet50(num_classes, batch_size=None):
    """Instantiates the ResNet50 architecture.

    Args:
      num_classes: `int` number of classes for image classification.
      batch_size: Size of the batches for each step.

    Returns:
        A Keras model instance.
    """

    input_shape = (224, 224, 3)
    img_input = layers.Input(shape=input_shape, batch_size=batch_size)
    x = img_input

    if backend.image_data_format() == 'channels_first':
        x = layers.Permute((3, 1, 2))(x)
        bn_axis = 1
    else:  # channels_last
        bn_axis = -1

    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
    x = layers.Conv2D(64, (7, 7),
                      strides=(2, 2),
                      padding='valid',
                      kernel_initializer='he_normal',
                      kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                      name='conv1')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    x = resnet_block(x,
                     size=3,
                     kernel_size=3,
                     filters=256,
                     stage=2,
                     conv_strides=(1, 1))
    x = resnet_block(x, size=4, kernel_size=3, filters=512, stage=3)
    x = resnet_block(x, size=6, kernel_size=3, filters=1024, stage=4)
    x = resnet_block(x, size=3, kernel_size=3, filters=2048, stage=5)

    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(num_classes,
                     kernel_initializer='he_normal',
                     kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                     name='fc1000')(x)

    # A softmax that is followed by the model loss must be done cannot be done
    # in float16 due to numeric issues. So we pass dtype=float32.
    x = layers.Activation('softmax', dtype='float32')(x)

    # Create model.
    return models.Model(img_input, x, name='resnet50')
Ejemplo n.º 22
0
    def _build_resnet(self,
                      num_classes,
                      blocks,
                      num_filters_per_block,
                      dtype='float32',
                      batch_size=None,
                      use_l2_regularizer=True,
                      reduce_mean=False,
                      softmax=True):

        if backend.image_data_format() == 'channels_first':
            bn_axis = 1
        else:  # channels_last
            bn_axis = 3

        # x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
        self._resnet_layers['conv1'] = layers.Conv2D(
            64, (3, 3),
            strides=(1, 1),
            padding='same',
            use_bias=False,
            kernel_initializer='he_normal',
            kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
            name='conv1')

        self._resnet_layers['bn_conv1'] = layers.BatchNormalization(
            axis=bn_axis,
            momentum=BATCH_NORM_DECAY,
            epsilon=BATCH_NORM_EPSILON,
            name='bn_conv1')

        for i, (num_block,
                num_filter) in enumerate(zip(blocks, num_filters_per_block)):
            stage_id = i + 1

            self._build_conv_block(3, [num_filter, num_filter, num_filter],
                                   stage=stage_id,
                                   block='a_',
                                   strides=(1, 1),
                                   use_l2_regularizer=use_l2_regularizer)

            for j in range(num_block):
                self._build_identity_block(
                    3, [num_filter, num_filter, num_filter],
                    stage=stage_id,
                    block='b_' + str(j),
                    use_l2_regularizer=use_l2_regularizer)
                self._build_identity_block(
                    3, [num_filter, num_filter, num_filter],
                    stage=stage_id,
                    block='c_' + str(j),
                    use_l2_regularizer=use_l2_regularizer)

        return
Ejemplo n.º 23
0
def create_sequential_model():
    model = tf.keras.Sequential()
    model.add(layers.Input(shape=(None, None, 3)))
    model.add(
        layers.Conv2D(filters=64,
                      kernel_size=(1, 1),
                      strides=2,
                      activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.Dense(2, activation="relu"))
    return model
Ejemplo n.º 24
0
def discriminator_and_classifier_model(c_dim):

    inputs = keras.Input(shape=(99,))
    x = layers.Dense(1024, input_shape=(64,))(inputs)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    x = layers.Dense(512)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    x = layers.Dense(256)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    d_out = layers.Dense(1)(x)

    x = layers.Dense(256)(x)
    x = layers.LeakyReLU()(x)
    q_out = layers.Dense(c_dim)(x)

    return keras.Model(inputs=inputs, outputs=d_out), keras.Model(inputs=inputs, outputs=q_out)
Ejemplo n.º 25
0
def generator():
    """
    Purpose of the Generator model is to images that looks real. During training,
    the Generator progressively becomes better at creating images that look real.

    The Generator model does up-sampling to produces images from random noise. It
    takes random noise as an input, then up-samples several times until reach
    desired image size (in this case 224x224x3).

    Returns:
         The Generator model.
    """
    start = time.time()

    model = keras.Sequential([
        layers.Dense(units=7 * 7 * 256, use_bias=False, input_shape=(GEN_NOISE_INPUT_SHAPE,)),
        layers.BatchNormalization(),
        layers.LeakyReLU(),
        layers.Reshape((7, 7, 256)),

        layers.Conv2DTranspose(filters=128, kernel_size=(5, 5), strides=(1, 1), padding="same", use_bias=False),
        layers.BatchNormalization(),
        layers.LeakyReLU(),

        layers.Conv2DTranspose(filters=64, kernel_size=(5, 5), strides=(2, 2), padding="same", use_bias=False),
        layers.BatchNormalization(),
        layers.LeakyReLU(),

        layers.Conv2DTranspose(filters=32, kernel_size=(5, 5), strides=(2, 2), padding="same", use_bias=False),
        layers.BatchNormalization(),
        layers.LeakyReLU(),

        layers.Conv2DTranspose(filters=3, kernel_size=(5, 5), strides=(2, 2), padding="same", use_bias=False,
                               activation="tanh"),
    ])

    end = time.time()
    if DEBUG_LOG:
        print("Execution time: {:.9f}s (generator)".format(end - start))

    return model
Ejemplo n.º 26
0
    def define_model(self):
        z = Input(shape=[self.model_parameters.latent_size])
        class_id = Input(shape=[1])

        embedded_id = layers.Embedding(input_dim=10, output_dim=50)(class_id)
        embedded_id = layers.Dense(units=7 * 7)(embedded_id)
        embedded_id = layers.Reshape(target_shape=(7, 7, 1))(embedded_id)

        x = layers.Dense(units=7 * 7 * 256, use_bias=False)(z)
        x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU()(x)

        x = layers.Reshape((7, 7, 256))(x)

        inputs = layers.Concatenate(axis=3)([x, embedded_id])

        x = layers.Conv2D(128, (5, 5),
                          strides=(1, 1),
                          padding='same',
                          use_bias=False)(inputs)
        x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU()(x)
        x = layers.UpSampling2D()(x)

        x = layers.Conv2D(64, (5, 5),
                          strides=(1, 1),
                          padding='same',
                          use_bias=False)(x)
        x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU()(x)

        x = layers.UpSampling2D()(x)

        x = layers.Conv2D(1, (5, 5),
                          strides=(1, 1),
                          padding='same',
                          use_bias=False,
                          activation='tanh')(x)

        model = Model(name=self.model_name, inputs=[z, class_id], outputs=x)
        return model
Ejemplo n.º 27
0
def DenseNet121(input_shape=None):
    input_shape = imagenet_utils.obtain_input_shape(
        input_shape,
        default_size=224,
        min_size=32,
        data_format=backend.image_data_format(),
        require_flatten=True)

    img_input = layers.Input(shape=input_shape)

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
    x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name='conv1/bn')(x)
    x = layers.Activation('relu', name='conv1/relu')(x)
    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
    x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)

    x = dense_block(x, 6, name='conv2')
    x = transition_block(x, 0.5, name='pool2')
    x = dense_block(x, 12, name='conv3')
    x = transition_block(x, 0.5, name='pool3')
    x = dense_block(x, 24, name='conv4')
    x = transition_block(x, 0.5, name='pool4')
    x = dense_block(x, 16, name='conv5')

    x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
    x = layers.Activation('relu', name='relu')(x)

    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)

    imagenet_utils.validate_activation('softmax', None)
    x = layers.Dense(NUM_CLASSES, activation='softmax', name='predictions')(x)

    # Create model.
    model = training.Model(img_input, x, name='densenet121')

    return model
Ejemplo n.º 28
0
def _separable_conv_block(ip,
                          filters,
                          kernel_size=(3, 3),
                          strides=(1, 1),
                          block_id=None):
    channel_dim = -1

    with backend.name_scope('separable_conv_block_%s' % block_id):
        x = layers.Activation('relu')(ip)
        if strides == (2, 2):
            x = layers.ZeroPadding2D(
                padding=imagenet_utils.correct_pad(x, kernel_size),
                name='separable_conv_1_pad_%s' % block_id)(x)
            conv_pad = 'valid'
        else:
            conv_pad = 'same'
        x = layers.SeparableConv2D(filters,
                                   kernel_size,
                                   strides=strides,
                                   name='separable_conv_1_%s' % block_id,
                                   padding=conv_pad,
                                   use_bias=False,
                                   kernel_initializer='he_normal')(x)
        x = layers.BatchNormalization(axis=channel_dim,
                                      momentum=0.9997,
                                      epsilon=1e-3,
                                      name='separable_conv_1_bn_%s' %
                                      (block_id))(x)
        x = layers.Activation('relu')(x)
        x = layers.SeparableConv2D(filters,
                                   kernel_size,
                                   name='separable_conv_2_%s' % block_id,
                                   padding='same',
                                   use_bias=False,
                                   kernel_initializer='he_normal')(x)
        x = layers.BatchNormalization(axis=channel_dim,
                                      momentum=0.9997,
                                      epsilon=1e-3,
                                      name='separable_conv_2_bn_%s' %
                                      (block_id))(x)
    return x
Ejemplo n.º 29
0
    def __init__(self, num_hidden, num_classes, use_bn=False, use_dp=False):
        super(SmallSubclassMLP, self).__init__(name='test_model')
        self.use_bn = use_bn
        self.use_dp = use_dp

        self.layer_a = layers.Dense(num_hidden, activation='relu')
        activation = 'sigmoid' if num_classes == 1 else 'softmax'
        self.layer_b = layers.Dense(num_classes, activation=activation)
        if self.use_dp:
            self.dp = layers.Dropout(0.5)
        if self.use_bn:
            self.bn = layers.BatchNormalization(axis=-1)
Ejemplo n.º 30
0
def discriminator_model():
    model = tf.keras.Sequential()
    model.add(layers.Dense(1024, input_shape=(121, )))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Dense(512, ))
    model.add(layers.LeakyReLU())

    model.add(layers.Dense(1))

    return model