def my_model():
    # prep layers
    inp = layers.Input(shape=(32, 32, 3))
    x = layers.Conv2D(64, 3, padding='same')(inp)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer1
    x = layers.Conv2D(128, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 128)])
    # layer2
    x = layers.Conv2D(256, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer3
    x = layers.Conv2D(512, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 512)])
    # layers4
    x = layers.GlobalMaxPool2D()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(10)(x)
    x = layers.Activation('softmax', dtype='float32')(x)
    model = tf.keras.Model(inputs=inp, outputs=x)

    return model
Example #2
0
def ResNet9(input_size: Tuple[int, int, int] = (32, 32, 3),
            classes: int = 10) -> tf.keras.Model:
    """A small 9-layer ResNet Tensorflow model for cifar10 image classification.
    The model architecture is from https://github.com/davidcpage/cifar10-fast

    Args:
        input_size: The size of the input tensor (height, width, channels).
        classes: The number of outputs the model should generate.

    Raises:
        ValueError: Length of `input_size` is not 3.
        ValueError: `input_size`[0] or `input_size`[1] is not a multiple of 16.

    Returns:
        A TensorFlow ResNet9 model.
    """
    _check_input_size(input_size)

    # prep layers
    inp = layers.Input(shape=input_size)
    x = layers.Conv2D(64, 3, padding='same')(inp)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer1
    x = layers.Conv2D(128, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 128)])
    # layer2
    x = layers.Conv2D(256, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer3
    x = layers.Conv2D(512, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 512)])
    # layers4
    x = layers.GlobalMaxPool2D()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(classes)(x)
    x = layers.Activation('softmax', dtype='float32')(x)
    model = tf.keras.Model(inputs=inp, outputs=x)

    return model
Example #3
0
def get_quantize_outputs_removal_test_model(input_shape):
    #               (input)
    #       /      /      \     \
    #   (conv1) (conv2) (conv3) (conv4)
    #     |        |         \        \
    #     |        |          \        \
    # (flatten1) (flatten1) (flatten2) (GlobalMaxPool2D)
    #        \      |       /           /
    #          (keras_concat)         /
    #                      \        /
    #                     (tf_concat)
    #                         /
    #                 (keras_reshape)
    #                    /
    #             (tf_reshape)

    inputs = tf.keras.Input(shape=input_shape[1:], name='input')
    conv1 = layers.Conv2D(3, 8, name='conv1')
    conv2 = layers.Conv2D(3, 8, name='conv2')
    conv3 = layers.Conv2D(3, 8, name='conv3')
    conv4 = layers.Conv2D(3, 8, name='conv4')
    flatten1 = layers.Flatten()
    flatten2 = layers.Flatten()
    keras_concat = layers.Concatenate(name='keras_concat')
    x1 = conv1(inputs)
    x1 = flatten1(x1)
    x2 = conv2(inputs)
    x2 = flatten1(x2)
    x3 = conv3(inputs)
    x3 = flatten2(x3)
    x4 = conv4(inputs)
    x4 = layers.GlobalMaxPool2D()(x4)
    x123 = keras_concat([x1, x2, x3])
    x = tf.concat([x123, x4], -1, name='tf_concat')
    x = layers.Reshape((-1, 4), name='keras_reshape')(x)
    outputs = tf.reshape(x, (-1, 2, 2), name='tf_reshape')
    return tf.keras.Model(inputs=inputs, outputs=outputs)
Example #4
0
#-*-encoding:utf-8-*-
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import layers
encode_input = tf.keras.Input(shape=(28, 28, 1), name='src_img')
h1 = layers.Conv2D(16, 3, activation=tf.nn.relu)(encode_input)
h1 = layers.Conv2D(32, 3, activation=tf.nn.relu)(h1)
h1 = layers.MaxPool2D(3)(h1)
h1 = layers.Conv2D(32, 3, activation='relu')(h1)
h1 = layers.Conv2D(16, 3, activation='relu')(h1)
encode_output = layers.GlobalMaxPool2D()(h1)
encode_model = tf.keras.Model(inputs=encode_input,
                              outputs=encode_output,
                              name='encoder')
encode_model.summary()

decode_input = tf.keras.Input(shape=(16, ), name='encoded_img')
h2 = layers.Reshape((4, 4, 1))(decode_input)
h2 = layers.Conv2DTranspose(16, 3, activation='relu')(h2)
h2 = layers.Conv2DTranspose(32, 3, activation='relu')(h2)
h2 = layers.UpSampling2D(3)(h2)
h2 = layers.Conv2DTranspose(16, 3, activation='relu')(h2)
decode_output = layers.Conv2DTranspose(1, 3, activation='relu')(h2)
decode_model = tf.keras.Model(inputs=decode_input,
                              outputs=decode_output,
                              name='decoder')
decode_model.summary()

autoencoder_input = tf.keras.Input(shape=(28, 28, 1), name='img')
h3 = encode_model(autoencoder_input)
autoencoder_output = decode_model(h3)