예제 #1
0
def TrongNet(input_shape, num_classes, pretrained_weights=None):
    input_image = layers.Input(shape=input_shape, name='input_1')
    x = layers.Conv2D(filters=16,
                      kernel_size=(3, 3),
                      activation=activations.relu)(input_image)
    x = layers.MaxPool2D(pool_size=(2, 2))(x)
    x = layers.Conv2D(filters=64,
                      kernel_size=(3, 3),
                      activation=activations.relu)(x)
    x = layers.MaxPool2D(pool_size=(2, 2))(x)
    x = layers.Conv2D(filters=64,
                      kernel_size=(3, 3),
                      activation=activations.relu)(x)
    x = layers.MaxPool2D(pool_size=(2, 2))(x)
    x = layers.Conv2D(filters=128,
                      kernel_size=(3, 3),
                      activation=activations.relu)(x)
    x = layers.MaxPool2D(pool_size=(2, 2))(x)
    x = layers.BatchNormalization()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(units=256, activation=activations.relu)(x)
    x = layers.Dropout(0.3)(x)
    x = layers.Dense(units=64, activation=activations.relu)(x)
    x = layers.Dense(units=num_classes,
                     activation=activations.softmax,
                     name='predictions')(x)
    model = models.Model(input_image, x, name='trongnet')
    if pretrained_weights:
        model.load_weights(pretrained_weights)
    return model
예제 #2
0
    def __init__(self):
        super(Network, self).__init__()

        self.mylayers = [
            # unit1
            layers.Conv2D(filters=32,
                          kernel_size=[5, 5],
                          padding='same',
                          activation=nn.relu),
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
            # unit2
            layers.Conv2D(filters=64,
                          kernel_size=[5, 5],
                          padding='same',
                          activation=nn.relu),
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),
            # flatten the tensor
            layers.Flatten(),
            # 2 full-connected layers
            layers.Dense(512, activation=nn.relu),
            layers.Dense(10, activation=nn.softmax)
            # layers.Dense(10, activation=None)
        ]
        # 根据tensorflow的版本确定网络的最后一层要不要加activation=nn.softmax
        # 如果tf版本低于1.13:
        #     则需要添加,
        #     训练时在model.compile中设置loss=keras.losses.categorical_crossentropy
        # 如果tf版本等于或者高于1.13:
        #     则不需要添加,
        #     训练时在model.compile中设置loss=keras.losses.CategoricalCrossentropy(from_logits=True)

        self.net = Sequential(self.mylayers)
def network_1_m():
    """
    network for memristor cnn simulink model
    Total params: 8,230
    Trainable params: 8,214
    Non-trainable params: 16
    acc: 0.983
    :return: model
    """
    inputs = kl.Input(shape=(8, 16, 1))
    bone = kl.BatchNormalization(1)(inputs)
    bone = kl.Conv2D(filters=8,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=4,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Flatten()(bone)
    bone = kl.Dense(units=60, activation='relu')(bone)
    outputs = kl.Dense(units=6, activation='softmax')(bone)
    model = km.Model(inputs=inputs, outputs=outputs)
    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
예제 #4
0
def my_model():
    # prep layers
    inp = layers.Input(shape=(32, 32, 3))
    x = layers.Conv2D(64, 3, padding='same')(inp)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer1
    x = layers.Conv2D(128, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 128)])
    # layer2
    x = layers.Conv2D(256, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer3
    x = layers.Conv2D(512, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 512)])
    # layers4
    x = layers.GlobalMaxPool2D()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(10)(x)
    x = layers.Activation('softmax', dtype='float32')(x)
    model = tf.keras.Model(inputs=inp, outputs=x)

    return model
def alex_net():
    model = tf.keras.Sequential()
    model.add(
        layers.Conv2D(filters=96,
                      kernel_size=(11, 11),
                      strides=(4, 4),
                      padding='valid',
                      activation='relu',
                      input_shape=(227, 227, 3),
                      kernel_initializer='uniform'))
    model.add(
        layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Conv2D(filters=256,
                      kernel_size=(5, 5),
                      strides=(1, 1),
                      padding='same',
                      activation='relu',
                      kernel_initializer='uniform'))
    model.add(
        layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))
    model.add(layers.BatchNormalization())
    model.add(
        layers.Conv2D(filters=384,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu',
                      kernel_initializer='uniform'))
    model.add(
        layers.Conv2D(filters=384,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu',
                      kernel_initializer='uniform'))
    model.add(
        layers.Conv2D(filters=256,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu',
                      kernel_initializer='uniform'))
    model.add(layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(4096, activation=tf.keras.activations.relu))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(4096, activation=tf.keras.activations.relu))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(10, activation=tf.keras.activations.softmax))
    return model
예제 #6
0
    def __init__(self):
        super(KerasModel, self).__init__()
        weight_decay = 1e-4
        self.conv1 = layers.Conv2D(
            32, (3, 3),
            padding='same',
            input_shape=(32, 32, 3),
            kernel_regularizer=regularizers.l2(weight_decay))
        self.elu1 = layers.ELU()
        self.bn1 = layers.BatchNormalization()
        self.conv2 = layers.Conv2D(
            32, (3, 3), kernel_regularizer=regularizers.l2(weight_decay))
        self.elu2 = layers.ELU()
        self.bn2 = layers.BatchNormalization()
        self.pool1 = layers.MaxPool2D(pool_size=(2, 2))
        self.dropout1 = layers.Dropout(rate=0.2)

        self.conv3 = layers.Conv2D(
            64, (3, 3),
            padding='same',
            kernel_regularizer=regularizers.l2(weight_decay))
        self.elu3 = layers.ELU()
        self.bn3 = layers.BatchNormalization()
        self.conv4 = layers.Conv2D(
            64, (3, 3), kernel_regularizer=regularizers.l2(weight_decay))
        self.elu4 = layers.ELU()
        self.bn4 = layers.BatchNormalization()
        self.pool2 = layers.MaxPool2D(pool_size=(2, 2))
        self.dropout2 = layers.Dropout(rate=0.3)

        self.conv5 = layers.Conv2D(
            128, (3, 3),
            padding='same',
            kernel_regularizer=regularizers.l2(weight_decay))
        self.elu5 = layers.ELU()
        self.bn5 = layers.BatchNormalization()
        self.conv6 = layers.Conv2D(
            128, (3, 3), kernel_regularizer=regularizers.l2(weight_decay))
        self.elu6 = layers.ELU()
        self.bn6 = layers.BatchNormalization()
        self.pool3 = layers.MaxPool2D(pool_size=(2, 2))
        self.dropout3 = layers.Dropout(rate=0.4)

        self.flatten1 = layers.Flatten()
        self.dense1 = layers.Dense(512)
        self.elu7 = layers.ELU()
        self.dropout4 = layers.Dropout(rate=0.5)
        self.dense2 = layers.Dense(10)
        self.softmax = layers.Softmax()
def network_1a():
    """
    reference: An optimized Deep Convolutional Neural Network for dendrobium classification based on electronic nose
    在原文基础上做了修改,加入BN层
    acc=0.992~0.995, size=6.17Mb, time=0.95s
    Total params: 534,214 (全连接层参数525312)
    :return: model
    """
    inputs = kl.Input(shape=(8, 16, 1))
    bone = kl.BatchNormalization(1)(
        inputs)  # modified: 加入BN层,极大加快收敛速度并提高准确率  TODO 改为layernorm可能效果更好
    bone = kl.Conv2D(filters=32,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Flatten()(bone)
    bone = kl.Dense(units=1024, activation='relu')(bone)
    bone = kl.Dropout(0.7)(bone)
    outputs = kl.Dense(units=6, activation='softmax')(bone)
    model = km.Model(inputs=inputs, outputs=outputs)
    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
예제 #8
0
def ResNet9(input_size: Tuple[int, int, int] = (32, 32, 3),
            classes: int = 10) -> tf.keras.Model:
    """A small 9-layer ResNet Tensorflow model for cifar10 image classification.
    The model architecture is from https://github.com/davidcpage/cifar10-fast

    Args:
        input_size: The size of the input tensor (height, width, channels).
        classes: The number of outputs the model should generate.

    Raises:
        ValueError: Length of `input_size` is not 3.
        ValueError: `input_size`[0] or `input_size`[1] is not a multiple of 16.

    Returns:
        A TensorFlow ResNet9 model.
    """
    _check_input_size(input_size)

    # prep layers
    inp = layers.Input(shape=input_size)
    x = layers.Conv2D(64, 3, padding='same')(inp)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer1
    x = layers.Conv2D(128, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 128)])
    # layer2
    x = layers.Conv2D(256, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    # layer3
    x = layers.Conv2D(512, 3, padding='same')(x)
    x = layers.MaxPool2D()(x)
    x = layers.BatchNormalization(momentum=0.8)(x)
    x = layers.LeakyReLU(alpha=0.1)(x)
    x = layers.Add()([x, residual(x, 512)])
    # layers4
    x = layers.GlobalMaxPool2D()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(classes)(x)
    x = layers.Activation('softmax', dtype='float32')(x)
    model = tf.keras.Model(inputs=inp, outputs=x)

    return model
예제 #9
0
 def __init__(self):
     super(CNN, self).__init__()
     self.feature = models.Sequential([
         layers.Conv2D(64, (3, 3),
                       activation=tf.nn.relu,
                       input_shape=(28, 28, 1)),
         layers.MaxPool2D((2, 2)),
         layers.Conv2D(64, (3, 3), activation=tf.nn.relu),
         layers.MaxPool2D((2, 2)),
         layers.Conv2D(64, (3, 3), activation=tf.nn.relu)
     ])
     self.classifier = models.Sequential([
         layers.Flatten(),
         layers.Dense(64, activation=tf.nn.relu),
         layers.Dense(10, activation=tf.nn.softmax)
     ])
예제 #10
0
def create_classifier():

    with tf.name_scope("Disc"):
        X = kl.Input((28, 28, 1), name="X")
        layer = X

        for l in range(3):
            layer = kl.Conv2D(filters=64 * (2**l),
                              kernel_size=3,
                              padding="same",
                              use_bias=False,
                              activation="relu",
                              kernel_regularizer=kr.l2())(layer)
            layer = kl.Conv2D(filters=64 * (2**l),
                              kernel_size=3,
                              padding="same",
                              use_bias=False,
                              activation="relu",
                              kernel_regularizer=kr.l2())(layer)
            layer = kl.MaxPool2D()(layer)
            layer = kl.BatchNormalization()(layer)

        layer = kl.Flatten()(layer)
        layer = kl.Dense(256, kernel_regularizer=kr.l2())(layer)
        layer = kl.LeakyReLU()(layer)
        D_out = kl.Dense(10, activation="softmax",
                         kernel_regularizer=kr.l2())(layer)

        model = k.Model(inputs=X, outputs=D_out)
        fidmodel = k.Model(inputs=X, outputs=layer)
    return model, fidmodel
def network_1e():
    """
    2层卷积层:BN+Conv(8)+Conv(8)+Maxpooling+FC(512)+FC(6)
    acc=0.96~0.99, size=3Mb, time=0.05
    :return: model
    """
    inputs = kl.Input(shape=(8, 16, 1))
    bone = kl.BatchNormalization(1)(inputs)
    bone = kl.Conv2D(filters=8,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)  # 卷积核个数太少会影响效果
    bone = kl.Conv2D(filters=8,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Flatten()(bone)
    bone = kl.Dense(units=512, activation='relu')(bone)
    outputs = kl.Dense(units=6, activation='softmax')(bone)
    model = km.Model(inputs=inputs, outputs=outputs)
    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
def network_1a_arcface():
    """
    reference: An optimized Deep Convolutional Neural Network for dendrobium classification based on electronic nose
    add arcface
    :return: model
    """
    inputs = kl.Input(shape=(8, 16, 1))
    bone = kl.BatchNormalization(1)(
        inputs)  # modified: 加入BN层,极大加快收敛速度并提高准确率  TODO 改为layernorm可能效果更好
    bone = kl.Conv2D(filters=32,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Flatten()(bone)
    bone = kl.Dense(units=1024,
                    activation='relu',
                    kernel_initializer='he_normal')(bone)
    outputs = ArcFace(n_classes=6, s=1, m=0.35)(bone)
    model = km.Model(inputs=inputs, outputs=outputs)
    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
def network_1a_5boards():
    """
    reference: An optimized Deep Convolutional Neural Network for dendrobium classification based on electronic nose
    改用5boards数据集,使用FC512
    :return: model
    """
    inputs = kl.Input(shape=(300, 8, 1))
    bone = kl.BatchNormalization(1)(
        inputs)  # TODO 因为5boards数据相差不多,改为L2N层可能效果更好
    bone = kl.Conv2D(filters=32,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Flatten()(bone)
    bone = kl.Dense(units=512, activation='relu')(bone)
    outputs = kl.Dense(units=4, activation='softmax')(bone)
    model = km.Model(inputs=inputs, outputs=outputs)
    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
def network_1a_SDA(summary=True):
    """
    reference: An optimized Deep Convolutional Neural Network for dendrobium classification based on electronic nose
    connected with SDA1
    :return: model
    """
    inputs = kl.Input(shape=(16, 1, 1))
    # bone = kl.BatchNormalization(1)(inputs)  # modified: 加入BN层,极大加快收敛速度并提高准确率
    bone = kl.Conv2D(filters=32,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(inputs)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Flatten()(bone)
    bone = kl.Dense(units=1024, activation='relu')(bone)
    outputs = kl.Dense(units=6, activation='softmax')(bone)
    model = km.Model(inputs=inputs, outputs=outputs)
    if summary:
        model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
def network_1b():
    """
    将原文网络最后的FC层改为512个神经元
    acc=0, size=, time=0
    :return: model
    """
    inputs = kl.Input(shape=(8, 16, 1))
    bone = kl.BatchNormalization(1)(inputs)  # modified: 加入BN层,极大加快收敛速度并提高准确率
    bone = kl.Conv2D(filters=32,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Flatten()(bone)
    bone = kl.Dense(units=512, activation='relu')(bone)
    outputs = kl.Dense(units=6, activation='softmax')(bone)
    model = km.Model(inputs=inputs, outputs=outputs)
    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
예제 #16
0
    def __init__(self, height, width, channels):
        discriminator_input = layers.Input(shape=(height, width, channels))

        x = layers.Conv2D(64, 5, padding='same')(discriminator_input)
        x = layers.Activation('tanh')(x)
        x = layers.MaxPool2D()(x)
        x = layers.Conv2D(128, 5)(x)
        x = layers.Activation('tanh')(x)
        x = layers.MaxPool2D()(x)
        x = layers.Flatten()(x)
        x = layers.Dense(1024)(x)
        x = layers.Activation('tanh')(x)
        x = layers.Dense(1, activation='sigmoid')(x)

        self.discriminator = tf.keras.models.Model(discriminator_input, x)

        # compile discriminator
        discriminator_optimizer = tf.keras.optimizers.SGD(lr=0.0005, momentum=0.9, nesterov=True)
        self.discriminator.compile(optimizer=discriminator_optimizer, loss='binary_crossentropy')
예제 #17
0
def create_classifier():

    with tf.name_scope("Disc"):
        X = kl.Input((32, 32, 3), name="X")

        layer = kl.Conv2D(filters=16,
                          kernel_size=3,
                          padding="same",
                          activation="relu")(X)
        layer = kl.BatchNormalization()(layer)
        layer = kl.Conv2D(filters=32,
                          kernel_size=3,
                          padding="same",
                          activation="relu")(layer)
        layer = kl.BatchNormalization()(layer)
        layer = kl.MaxPool2D()(layer)

        layer = kl.Conv2D(filters=64,
                          kernel_size=4,
                          padding="same",
                          activation="relu")(layer)
        layer = kl.BatchNormalization()(layer)
        layer = kl.MaxPool2D()(layer)

        layer = kl.Conv2D(filters=128,
                          kernel_size=4,
                          padding="same",
                          activation="relu")(layer)
        layer = kl.BatchNormalization()(layer)
        layer = kl.MaxPool2D()(layer)

        layer = kl.Dropout(0.2)(layer)

        layer = kl.Flatten()(layer)
        fidout = layer
        layer = kl.Dense(512, activation="relu")(layer)
        layer = kl.Dropout(0.2)(layer)

        D_out = kl.Dense(10, activation="softmax")(layer)

        model = k.Model(inputs=X, outputs=D_out)
        fidmodel = k.Model(inputs=X, outputs=fidout)
    return model, fidmodel
예제 #18
0
def build_model():

    # Create LeNet model
    model = models.Sequential()
    model.add(
        layers.Conv2D(20, [3, 3],
                      input_shape=[28, 28, 1],
                      activation='relu',
                      name='conv_1'))
    model.add(layers.MaxPool2D())
    model.add(layers.Conv2D(50, [3, 3], activation='relu', name='conv_2'))
    model.add(layers.MaxPool2D())
    model.add(layers.Permute((2, 1, 3)))
    model.add(layers.Flatten())
    model.add(layers.Dense(500, activation='relu', name='dense_1'))
    model.add(layers.Dense(10, activation='softmax', name='dense_2'))

    compile_model(model)

    return model
예제 #19
0
def max_pool1d(x, kernel_size=2, stride=2, padding='same'):
    """
    Args:
        x: input_tensor (N, L)
        kernel_size: int
        stride: int
        padding: 'same' or 'valid'
    Returns: tensor (N, L//ks)
    """
    with tf.name_scope(max_pool1d.__name__):
        _x = tf.expand_dims(x, axis=2)
        _x = kl.MaxPool2D((kernel_size, 1), (stride, 1), padding)(_x)
        _x = tf.squeeze(_x, axis=2)
    return _x
예제 #20
0
    def __init__(self, out_channels, activation='swish1', **kwargs):
        self.conv1 = ConvBlock(out_channels, activation, add_maxpool=False)
        self.conv2 = ConvBlock(out_channels, activation, add_maxpool=False)
        self.conv3 = ConvBlock(out_channels,
                               activation=None,
                               add_maxpool=False)

        self.nl = create_activation(activation)
        self.maxpool = layers.MaxPool2D()

        self.conv_res = layers.Conv2D(out_channels, 3, padding='same')
        self.bn_res = layers.BatchNormalization()

        self.out_channels = out_channels
        super(ResidualBlock, self).__init__(**kwargs)
예제 #21
0
    def __init__(self):
        super(MyAlexNet, self).__init__()

        self.Layers = [
            layers.Conv2D(filters=48 , kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 64
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

            layers.Conv2D(filters=128, kernel_size=[3, 3],padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)),  # 192
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

            layers.Conv2D(filters=192, kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 384
            layers.Conv2D(filters=192, kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 256
            layers.Conv2D(filters=128, kernel_size=[3, 3], padding='same', activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)), # 256
            layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

            layers.Flatten(),

            layers.Dense(2048, activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)),     # 2048
            layers.Dense(2048, activation=nn.relu, kernel_regularizer=regularizers.l2(hps.lamda)),     # 2048
            layers.Dense(10, activation=nn.softmax, kernel_regularizer=regularizers.l2(hps.lamda)),
            # layers.Dense(10, activation=None),
        ]
        self.net = Sequential(self.Layers)
        self.net.build(input_shape=[None, 32, 32, 3])
예제 #22
0
파일: model.py 프로젝트: Kexine/math-cgv
        def encoder_block(a, n_filters):
            a = layers.Conv2D(filters=n_filters,
                              kernel_size=(4, 4),
                              padding='same',
                              kernel_regularizer=regularizers.l1_l2(
                                  l1=Config.l1_kernel_regularization,
                                  l2=Config.l2_kernel_regularization))(a)
            a = layers.BatchNormalization()(a)
            a = layers.LeakyReLU()(a)
            a = layers.MaxPool2D(pool_size=(2, 2))(a)

            if Config.use_spatial_dropout:
                a = layers.SpatialDropout2D(
                    rate=Config.spatial_dropout_rate)(a)
            return a
예제 #23
0
    def __init__(self,
                 block,
                 layer_sizes,
                 width_per_group=64,
                 replace_stride_with_dilation=None):
        self.inplanes = 64
        self.dilation = 1
        if replace_stride_with_dilation is None:
            # each element in the tuple indicates if we should replace
            # the 2x2 stride with a dilated convolution instead
            replace_stride_with_dilation = [False, False, False]
        if len(replace_stride_with_dilation) != 3:
            raise ValueError("replace_stride_with_dilation should be None "
                             "or a 3-element tuple, got {}".format(
                                 replace_stride_with_dilation))
        self.base_width = width_per_group

        conv1 = layers.Conv2D(self.inplanes, (7, 7),
                              strides=(2, 2),
                              padding='same',
                              use_bias=False)
        bn1 = layers.BatchNormalization()
        relu = layers.ReLU()
        maxpool = layers.MaxPool2D(pool_size=(3, 3),
                                   strides=(2, 2),
                                   padding='same')

        self.layer0 = Sequential(layers=[conv1, bn1, relu, maxpool],
                                 name='layer0')
        self.layer1 = self._make_layer('layer1', block, 64, layer_sizes[0])
        self.layer2 = self._make_layer('layer2',
                                       block,
                                       128,
                                       layer_sizes[1],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[0])
        self.layer3 = self._make_layer('layer3',
                                       block,
                                       256,
                                       layer_sizes[2],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[1])
        self.layer4 = self._make_layer('layer4',
                                       block,
                                       512,
                                       layer_sizes[3],
                                       stride=2,
                                       dilate=replace_stride_with_dilation[2])
예제 #24
0
def add_vgg_conv_pool_stack(num_conv, filters, model):
    """

    :param num_conv: Number of convolution layer stacks
    :param filters: number of filters
    :param model: keras/ tensroflow 2.0 model
    :return:
    """
    for i in range(num_conv):
        model.add(
            layers.Conv2D(filters=filters,
                          kernel_size=conv_filter,
                          activation='relu',
                          strides=conv_strides,
                          padding=conv_padding))
    model.add(layers.MaxPool2D(pool_size=pool_size, strides=pool_stride))
예제 #25
0
    def __init__(self, layer_dims, num_classes):
        super(ResNet, self).__init__()
        self.stem = Sequential([
            layers.Conv2D(64, (3, 3), strides=(1, 1)),
            layers.BatchNormalization(),
            layers.Activation('relu'),
            layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
        ])

        self.layer1 = self.build_resblock(filter_num=64, blocks=layer_dims[0])
        self.layer2 = self.build_resblock(filter_num=128, blocks=layer_dims[1], stride=2)
        self.layer3 = self.build_resblock(filter_num=256, blocks=layer_dims[2], stride=2)
        self.layer4 = self.build_resblock(filter_num=512, blocks=layer_dims[3], stride=2)

        self.avg_pool = layers.GlobalAveragePooling2D()
        self.fc = layers.Dense(units=num_classes)
예제 #26
0
    def __init__(self, layer_dims, num_classes=10):
        super(ResNet, self).__init__()
        # 预处理层;一个63*3*3卷积层,加BN,relu激活再池化。
        self.stem = Sequential([
            layers.Conv2D(64, (3, 3), strides=(1, 1)),
            layers.BatchNormalization(),
            layers.Activation('relu'),
            layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
        ])
        # 创建4个Res Block
        self.layer1 = self.build_resblock(64, layer_dims[0])
        self.layer2 = self.build_resblock(128, layer_dims[1], stride=2)
        self.layer3 = self.build_resblock(256, layer_dims[2], stride=2)
        self.layer4 = self.build_resblock(512, layer_dims[3], stride=2)

        self.avgpool = layers.GlobalAveragePooling2D()
        self.fc = layers.Dense(num_classes)
예제 #27
0
    def __init__(self, out_channels_1_1, reduce_channels_3_3, out_channels_3_3,
                 reduce_channels_5_5, out_channels_5_5, pool_proj, check):
        super(Inception_Block, self).__init__()
        assert out_channels_1_1 + out_channels_3_3 + out_channels_5_5 + pool_proj == check

        self.inception_conv_1 = Sequential([
            layers.Conv2D(filters=out_channels_1_1,
                          kernel_size=(1, 1),
                          padding='same'),
            layers.BatchNormalization(),
            layers.Activation('relu')
        ])

        self.inception_conv_3 = Sequential([
            layers.Conv2D(filters=reduce_channels_3_3,
                          kernel_size=(1, 1),
                          padding='same'),
            layers.BatchNormalization(),
            layers.Activation('relu'),
            layers.Conv2D(filters=out_channels_3_3,
                          kernel_size=(3, 3),
                          padding='same'),
            layers.BatchNormalization(),
            layers.Activation('relu')
        ])

        self.inception_conv_5 = Sequential([
            layers.Conv2D(filters=reduce_channels_5_5,
                          kernel_size=(1, 1),
                          padding='same'),
            layers.BatchNormalization(),
            layers.Activation('relu'),
            layers.Conv2D(filters=out_channels_5_5,
                          kernel_size=(3, 3),
                          padding='same'),
            layers.BatchNormalization(),
            layers.Activation('relu')
        ])

        self.inception_conv_pool = Sequential([
            layers.MaxPool2D(pool_size=(3, 3), strides=1, padding='same'),
            layers.Conv2D(filters=pool_proj, kernel_size=(1, 1)),
            layers.BatchNormalization(),
            layers.Activation('relu')
        ])
예제 #28
0
    def _build(self, params) -> None:
        """
        Builds the convolutional network.
        """

        # Get the main parameters of the network
        img_size = params.image_size
        in_channels = params.in_channels
        out_channels = params.out_channels
        num_labels = params.num_labels
        bn_momentum = params.bn_momentum
        channels = [out_channels, out_channels * 2, out_channels * 4]

        for i, c in enumerate(channels):
            self._model.add(
                layers.Conv2D(filters=c,
                              kernel_size=(7, 7),
                              input_shape=(img_size, img_size, in_channels),
                              data_format='channels_last',
                              padding='same',
                              name='conv1_{i}'.format(i=i)))
            self._model.add(
                layers.BatchNormalization(momentum=bn_momentum,
                                          name='batch_norm_{i}'.format(i=i)))
            self._model.add(
                layers.Activation('selu', name='selu1_{i}'.format(i=i)))
            self._model.add(
                layers.MaxPool2D(pool_size=(3, 3),
                                 strides=(3, 3),
                                 name='max_pool_{i}'.format(i=i),
                                 padding='valid'))

        self._model.add(layers.Flatten(name='flatt_1'))
        self._model.add(
            layers.Dense(units=128, name='last_linear', activation='linear'))

        self._model.add(
            layers.BatchNormalization(momentum=bn_momentum,
                                      name='batch_norm_last'))
        self._model.add(layers.Activation('selu', name='last_selu'))
        self._model.add(
            layers.Dense(units=num_labels,
                         name='classifier',
                         activation='sigmoid'))
예제 #29
0
def get_test_model_shared_convs(input_shape):
    inputs = tf.keras.Input(shape=input_shape[1:], name='input')
    conv1 = layers.Conv2D(512, 1, name='conv1', kernel_initializer='Ones',
                          bias_initializer='Ones')
    conv2 = layers.Conv2D(1024, 3, name='conv2', kernel_initializer='Ones',
                          bias_initializer='Ones')
    conv3 = layers.Conv2D(1024, 1, name='conv3', kernel_initializer='Ones',
                          bias_initializer='Ones')
    maxpool = layers.MaxPool2D()

    in1 = conv1(inputs)
    in2 = maxpool(in1)
    out1 = conv2(in1)
    out2 = conv2(in2)
    x = conv3(out1)
    y = conv3(out2)

    init_conv_weights(conv1.kernel)
    init_conv_weights(conv2.kernel)
    init_conv_weights(conv3.kernel)
    return tf.keras.Model(inputs=inputs, outputs=[x, y])
예제 #30
0
    def learn_inv_cov(self, var_scope='inv'):
        '''
        Unary precision matrix (inverse of the covariance matrix)
        '''
        self.learned_inv_covs = []
        self.learned_log_det_inv_covs = []

        for i in range(self.num_modules):
            h = tf.concat([self.logits[i], self.last_feature[i]], axis=-1)
            h = self.conv1_inv(h)
            h_32 = layers.AveragePooling2D(pool_size=(2, 2),
                                           strides=2,
                                           name=var_scope + '_AvgPool2D')(h)
            h_32 = self.conv2_inv(h_32)
            h_8 = layers.MaxPool2D(pool_size=(4, 4),
                                   strides=4,
                                   name=var_scope + '_MaxPool2D')(h_32)

            h_8 = tf.transpose(h_8, [0, 3, 1, 2])
            h_8 = tf.reshape(h_8,
                             shape=[tf.shape(h_8)[0], self.FLAGS.num_lmks, 64])
            h_8 = self.dense1(h_8)
            h_8 = self.dense2(h_8)
            h_8 = self.dense3(h_8)

            # N C 2 2 chol
            h_low = tf.contrib.distributions.fill_triangular(h_8)  # lower
            h_diag = tf.abs(tf.linalg.diag_part(h_low)) + 0.01
            log_h_diag = tf.math.log(h_diag)  # diagonal element >0
            self.h = tf.linalg.set_diag(h_low, h_diag)

            # N C 2 2
            learned_inv_cov = tf.matmul(self.h, self.h,
                                        transpose_b=True)  # lower*upper
            learned_log_det_inv_cov = 2 * tf.reduce_sum(log_h_diag, -1)

            self.learned_inv_covs.append(learned_inv_cov)
            self.learned_log_det_inv_covs.append(learned_log_det_inv_cov)

        return self.learned_inv_covs, self.learned_log_det_inv_covs