def my_cnn():
    # Define model
    model = keras.Sequential()
    model.add(
        layers.Convolution2D(16, (3, 3),
                             padding='same',
                             input_shape=(128, 128, 3),
                             activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(
        layers.Convolution2D(32, (3, 3), padding='same', activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(
        layers.Convolution2D(64, (3, 3), padding='same', activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dense(100, activation='softmax'))
    # Train model
    # adam = tf.train.AdamOptimizer()
    # model.compile(loss='categorical_crossentropy',
    #               optimizer=adam,
    #               metrics=['top_k_categorical_accuracy'])
    print(model.summary())
    return model
Exemple #2
0
def initialize_model():
    model = models.Sequential()
    model.add(
        layers.Convolution2D(16, (3, 3),
                             padding='same',
                             input_shape=(28, 28, 1),
                             activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(
        layers.Convolution2D(32, (3, 3), padding='same', activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(
        layers.Convolution2D(64, (3, 3), padding='same', activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dense(NUM_CLASSES, activation='softmax'))
    # Train model
    model.summary()
    #adam = tf.compat.v1.train.AdamOptimizer()
    f1 = tfa.metrics.F1Score(num_classes=NUM_CLASSES, average='macro')
    kappa = tfa.metrics.CohenKappa(num_classes=NUM_CLASSES)
    top = metrics.TopKCategoricalAccuracy(k=1)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy', kappa, f1, top])
    return model
Exemple #3
0
    def convolution_block(self,
                          x,
                          nb_channels,
                          dropout_rate=None,
                          bottleneck=False,
                          weight_decay=1e-4):
        """
        Creates a convolution block consisting of BN-ReLU-Conv.
        Optional: bottleneck, dropout
        """

        # Bottleneck
        if bottleneck:
            bottleneckWidth = 4
            x = layers.BatchNormalization()(x)
            x = layers.Activation('relu')(x)
            x = layers.Convolution2D(
                nb_channels * bottleneckWidth, (1, 1),
                kernel_regularizer=tf.keras.regularizers.l2(weight_decay))(x)
            # Dropout
            if dropout_rate:
                x = layers.Dropout(dropout_rate)(x)

        # Standard (BN-ReLU-Conv)
        x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)
        x = layers.Convolution2D(nb_channels, (3, 3), padding='same')(x)

        # Dropout
        if dropout_rate:
            x = layers.Dropout(dropout_rate)(x)

        return x
    def build_model_architecture(self):
        # base = self.pre_train_wh_fine_tuning()
        # self.model = models.Sequential()
        # self.model.add(base)
        # self.model.add(layers.Flatten())
        # self.model.add(layers.Dense(256, activation='relu'))
        # self.model.add(layers.Dense(64, activation='relu'))
        # self.model.add(layers.Dense(3, activation='softmax'))
        self.model = models.Sequential()
        self.model.add(layers.Convolution2D(16, 3, activation='relu', input_shape=self.input_shape))

        self.model.add(layers.Convolution2D(32, 3, activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))

        self.model.add(layers.Convolution2D(64, 3, activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))

        self.model.add(layers.Convolution2D(128, 3, activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))

        self.model.add(layers.BatchNormalization())
        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(256, activation='relu'))
        self.model.add(layers.Dense(64, activation='relu'))
        self.model.add(layers.Dense(3, activation='softmax'))
Exemple #5
0
def define_model(input_shape):
    model = keras.Sequential()
    # input_shape=X_train.shape[1:]
    model.add(
        layers.Convolution2D(16, (3, 3),
                             padding='same',
                             input_shape=input_shape,
                             activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(
        layers.Convolution2D(32, (3, 3), padding='same', activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(
        layers.Convolution2D(64, (3, 3), padding='same', activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation='relu'))
    model.add(layers.Dense(100, activation='softmax'))

    # 训练模型
    adam = tf.train.AdamOptimizer()
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam,
                  metrics=['top_k_categorical_accuracy'])

    return model
def conv_block_td(input_tensor,
                  kernel_size,
                  filters,
                  stage,
                  block,
                  input_shape,
                  strides=(2, 2),
                  trainable=True):
    # conv block time distributed
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.TimeDistributed(layers.Convolution2D(
        nb_filter1, (1, 1),
        strides=strides,
        trainable=trainable,
        kernel_initializer='normal'),
                               input_shape=input_shape,
                               name=conv_name_base + '2a')(input_tensor)

    x = layers.TimeDistributed(layers.BatchNormalization(axis=3),
                               name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = layers.TimeDistributed(layers.Convolution2D(
        nb_filter2, (kernel_size, kernel_size),
        padding='same',
        trainable=trainable,
        kernel_initializer='normal'),
                               name=conv_name_base + '2b')(x)

    x = layers.TimeDistributed(layers.BatchNormalization(axis=3),
                               name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.TimeDistributed(layers.Convolution2D(
        nb_filter3, (1, 1), kernel_initializer='normal'),
                               name=conv_name_base + '2c',
                               trainable=trainable)(x)

    x = layers.TimeDistributed(layers.BatchNormalization(axis=3),
                               name=bn_name_base + '2c')(x)

    shortcut = layers.TimeDistributed(layers.Convolution2D(
        nb_filter3, (1, 1),
        strides=strides,
        trainable=trainable,
        kernel_initializer='normal'),
                                      name=conv_name_base + '1')(input_tensor)

    shortcut = layers.TimeDistributed(layers.BatchNormalization(axis=3),
                                      name=bn_name_base + '1')(shortcut)
    x = layers.Add()([x, shortcut])
    x = layers.Activation('relu')(x)
    return x
def build(input_shape, classes):
    model = models.Sequential()
    model.add(
        layers.Convolution2D(20, (5, 5),
                             activation='relu',
                             input_shape=input_shape))
    model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(layers.Convolution2D(50, (5, 5), activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(500, activation='relu'))
    model.add(layers.Dense(classes, activation="softmax"))
    return model
def le_net_5(input_shape, dropout):
    m = models.Sequential()
    m.add(layers.Lambda(lambda x: x / 127.5 - 1., input_shape=input_shape))
    m.add(layers.Convolution2D(64, 5, 5, activation='relu'))
    m.add(layers.MaxPooling2D((2, 2)))
    m.add(layers.Dropout(dropout))
    m.add(layers.Convolution2D(36, 5, 5, activation='relu'))
    m.add(layers.MaxPooling2D((2, 2)))
    m.add(layers.Flatten())
    m.add(layers.Dense(120))
    m.add(layers.Dropout(dropout))
    m.add(layers.Dense(84))
    m.add(layers.Dense(1))
    return m
Exemple #9
0
def nin_block(num_channels, kernel_size, strides, padding):
    net.add(
        layers.Convolution2D(filters=num_channels,
                             padding=padding,
                             kernel_size=kernel_size,
                             strides=strides,
                             activation='relu'))
    net.add(
        layers.Convolution2D(filters=num_channels,
                             kernel_size=1,
                             activation='relu'))
    net.add(
        layers.Convolution2D(filters=num_channels,
                             kernel_size=1,
                             activation='relu'))
Exemple #10
0
def convBlock(prev, sz, filters):
    conv_1 = layers.Convolution2D(filters, (sz, sz),
                                  padding="same",
                                  activation="relu")(prev)
    conv_1 = layers.Dropout(0.1)(conv_1)
    conv_1 = layers.BatchNormalization()(conv_1)
    return conv_1
Exemple #11
0
def create_bert_cnn_model(num_tokens: int, num_filters: int, filter_size: int,
                          embedding_dim: int, nn_hidden_dim: int,
                          dropout_prob: float):
    # define the encoder for bert model
    bert_encoder = TFBertModel.from_pretrained('bert-base-uncased')

    input_word_ids = tf.keras.Input(shape=(num_tokens, ),
                                    dtype=tf.int32,
                                    name="input_word_ids")
    bert_embedding = bert_encoder([input_word_ids])
    cnn_input = tf.expand_dims(bert_embedding[0], -1)
    cnn_output = layers.Convolution2D(filters=num_filters,
                                      kernel_size=[filter_size, embedding_dim],
                                      activation='relu')(cnn_input)
    max_pooled_output = tf.nn.max_pool(cnn_output,
                                       ksize=[1, 13, 1, 1],
                                       strides=[1, 1, 1, 1],
                                       padding='VALID')
    max_pooled_output = tf.reshape(max_pooled_output, [-1, 200])
    hidden_output = layers.Dense(nn_hidden_dim,
                                 activation='relu')(max_pooled_output)
    hidden_output = layers.Dropout(dropout_prob)(hidden_output)
    output = layers.Dense(1, activation='sigmoid')(hidden_output)
    model = tf.keras.Model(inputs=[input_word_ids], outputs=output)
    return model
Exemple #12
0
def downsamplingBlockWithLink(prev, sz, filters):
    link = convBlock(prev, sz, filters)

    res = link
    for _ in range(3):
        res = convBlock(res, sz, filters)
    res = layers.Convolution2D(filters, (2, 2), strides=2, padding="same")(res)
    return link, res
def nvidia_model(input_shape, dropout):
    m = models.Sequential()
    m.add(layers.Lambda(lambda x: x / 255.0 - 0.5, input_shape=input_shape))
    m.add(layers.Cropping2D(cropping=((70, 25), (0, 0))))
    m.add(layers.Convolution2D(24, 5, 2, activation='relu'))
    m.add(layers.Convolution2D(36, 5, 2, activation='relu'))
    m.add(layers.Convolution2D(48, 5, 2, activation='relu'))
    m.add(layers.Dropout(dropout))
    m.add(layers.Convolution2D(64, 3, activation='relu'))
    m.add(layers.Convolution2D(64, 3, activation='relu'))
    m.add(layers.Flatten())
    m.add(layers.Dense(100))
    m.add(layers.Dropout(dropout))
    m.add(layers.Dense(50))
    m.add(layers.Dense(10))
    m.add(layers.Dense(1))
    return m
Exemple #14
0
def make_sr_generator_model():
    lr_img = layers.Input(shape=(None, None, NUM_CHANNELS))
    #lr_img = layers.Input(shape=(1080, 1080, NUM_CHANNELS))
    ################################################################################
    ## Now that we have a low res image, we can start the actual generator ResNet ##
    ################################################################################

    x = layers.Convolution2D(64, (9, 9), (1, 1), padding='same')(lr_img)
    x = layers.ReLU()(x)

    b_prev = x

    #####################
    ## Residual Blocks ##
    #####################

    for i in range(B):
        b_curr = layers.Convolution2D(64, (3, 3), (1, 1),
                                      padding='same')(b_prev)
        b_curr = layers.BatchNormalization()(b_curr)
        b_curr = layers.ReLU()(b_curr)
        b_curr = layers.Convolution2D(64, (3, 3), (1, 1),
                                      padding='same')(b_curr)
        b_curr = layers.BatchNormalization()(b_curr)
        b_curr = layers.Add()([b_prev, b_curr])  #skip connection

        b_prev = b_curr

    res_out = b_curr  # Output of residual blocks

    x2 = layers.Convolution2D(64, (3, 3), (1, 1), padding='same')(res_out)
    x2 = layers.BatchNormalization()(x2)
    x = layers.Add()([x, x2])  #skip connection

    #######################################################
    ## Resolution-enhancing sub-pixel convolution layers ##
    #######################################################

    # Layer 1 (Half of the upsampling)
    x = layers.Convolution2D(256, (3, 3), (1, 1), padding='same')(res_out)
    x = SubpixelConv2D(input_shape=(None, None, None, NUM_CHANNELS),
                       scale=DOWNSAMPLING_FACTOR / 2,
                       idx=0)(x)
    #x = Subpixel(256, kernel_size=(3,3), r=DOWNSAMPLING_FACTOR/2, padding='same', strides=(1,1))
    x = layers.ReLU()(x)

    # Layer 2 (Second half of the upsampling)
    x = layers.Convolution2D(256, (3, 3), (1, 1), padding='same')(x)
    x = SubpixelConv2D(input_shape=(None, None, None, NUM_CHANNELS /
                                    ((DOWNSAMPLING_FACTOR / 2)**2)),
                       scale=(DOWNSAMPLING_FACTOR / 2),
                       idx=1)(x)
    #x = Subpixel(256, kernel_size=(3,3), r=DOWNSAMPLING_FACTOR/2, padding='same', strides=(1,1))
    x = layers.ReLU()(x)

    generated_sr_image = layers.Convolution2D(3, (9, 9), (1, 1),
                                              padding='same')(x)
    output_shape = generated_sr_image.get_shape().as_list()
    #assert output_shape == [None, HR_IMG_HEIGHT, HR_IMG_WIDTH, NUM_CHANNELS]
    return Model(inputs=lr_img, outputs=generated_sr_image, name='generator')
Exemple #15
0
def make_sr_discriminator_model():
    inputs = layers.Input(shape=(HR_IMG_HEIGHT, HR_IMG_WIDTH, NUM_CHANNELS))
    # k3n64s1
    x = layers.Convolution2D(64, (3, 3), (1, 1), padding='same')(inputs)
    x = layers.LeakyReLU(alpha=0.2)(x)

    #################
    ## Conv Blocks ##
    #################

    # k3n64s2
    x = layers.Convolution2D(64, (3, 3), (2, 2), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    # k3n128s1
    x = layers.Convolution2D(128, (3, 3), (1, 1), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    # k3n128s2
    x = layers.Convolution2D(128, (3, 3), (2, 2), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    # k3n256s1
    x = layers.Convolution2D(256, (3, 3), (1, 1), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    # k3n256s2
    x = layers.Convolution2D(256, (3, 3), (2, 2), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    # k3n512s1
    x = layers.Convolution2D(512, (3, 3), (1, 1), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    # k3n512s2
    x = layers.Convolution2D(512, (3, 3), (2, 2), padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    ################
    ## Dense Tail ##
    ################

    x = layers.Dense(1024)(x)
    x = layers.LeakyReLU(alpha=0.2)(x)

    outputs = layers.Dense(1, activation='sigmoid')(x)

    return Model(inputs=inputs, outputs=outputs, name='discriminator')
def simple_net(input_shape):
    m = models.Sequential()
    m.add(layers.Lambda(lambda x: x / 127.5 - 1., input_shape=input_shape))
    m.add(layers.Cropping2D(cropping=((50, 20), (0, 0))))
    m.add(layers.Convolution2D(24, 5, 5, activation='relu'))
    m.add(layers.MaxPooling2D())
    m.add(layers.Flatten())
    m.add(layers.Dense(120))
    m.add(layers.Dense(1))
    return m
Exemple #17
0
    def __init__(self, embedding_dim: int, filters: List[int],
                 out_channels: int):
        super(baseCNNmodel, self).__init__()

        # define the three convolutional layers with respective filter sizes
        self.filters = filters
        self.out_channels = out_channels
        self.conv1 = layers.Convolution2D(
            filters=out_channels,
            kernel_size=[filters[0], embedding_dim],
            activation='relu')
        self.conv2 = layers.Convolution2D(
            filters=out_channels,
            kernel_size=[filters[1], embedding_dim],
            activation='relu')
        self.conv3 = layers.Convolution2D(
            filters=out_channels,
            kernel_size=[filters[2], embedding_dim],
            activation='relu')
def conv_block(input_tensor,
               kernel_size,
               filters,
               stage,
               block,
               strides=(2, 2),
               trainable=True):
    nb_filter1, nb_filter2, nb_filter3 = filters
    bn_axis = 3

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.Convolution2D(nb_filter1, (1, 1),
                             strides=strides,
                             name=conv_name_base + '2a',
                             trainable=trainable)(input_tensor)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = layers.Convolution2D(nb_filter2, (kernel_size, kernel_size),
                             padding='same',
                             name=conv_name_base + '2b',
                             trainable=trainable)(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.Convolution2D(nb_filter3, (1, 1),
                             name=conv_name_base + '2c',
                             trainable=trainable)(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    shortcut = layers.Convolution2D(nb_filter3, (1, 1),
                                    strides=strides,
                                    name=conv_name_base + '1',
                                    trainable=trainable)(input_tensor)
    shortcut = layers.BatchNormalization(axis=bn_axis,
                                         name=bn_name_base + '1')(shortcut)

    x = layers.Add()([x, shortcut])
    x = layers.Activation('relu')(x)
    return x
def create_model():
    model = Sequential(
        [
            layers.Convolution2D(24, (5, 5), (2, 2), input_shape=(66, 200, 3), activation='elu'),
            layers.Convolution2D(36, (5, 5), (2, 2), activation='elu'),
            layers.Convolution2D(48, (5, 5), (2, 2), activation='elu'),
            layers.Convolution2D(64, (3, 3), activation='elu'),
            layers.Convolution2D(64, (3, 3), activation='elu'),
            
            layers.Flatten(),
            layers.Dense(100, activation='elu'),
            layers.Dense(50, activation='elu'),
            layers.Dense(10, activation='elu'),
            layers.Dense(1)
        ]
    ) 

    opt = Adam(learning_rate=0.0003)
    model.compile(loss='mse', optimizer=opt)
    return model
def identity_block_td(input_tensor,
                      kernel_size,
                      filters,
                      stage,
                      block,
                      trainable=True):
    # identity block time distributed
    nb_filter1, nb_filter2, nb_filter3 = filters

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.TimeDistributed(layers.Convolution2D(
        nb_filter1, (1, 1), trainable=trainable, kernel_initializer='normal'),
                               name=conv_name_base + '2a')(input_tensor)

    x = layers.TimeDistributed(layers.BatchNormalization(axis=3),
                               name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = layers.TimeDistributed(layers.Convolution2D(
        nb_filter2, (kernel_size, kernel_size),
        trainable=trainable,
        kernel_initializer='normal',
        padding='same'),
                               name=conv_name_base + '2b')(x)

    x = layers.TimeDistributed(layers.BatchNormalization(axis=3),
                               name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.TimeDistributed(layers.Convolution2D(
        nb_filter3, (1, 1), trainable=trainable, kernel_initializer='normal'),
                               name=conv_name_base + '2c')(x)
    x = layers.TimeDistributed(layers.BatchNormalization(axis=3),
                               name=bn_name_base + '2c')(x)

    x = layers.Add()([x, input_tensor])
    x = layers.Activation('relu')(x)

    return x
Exemple #21
0
    def build(input_shape, classes, just_fc=False):
        model = models.Sequential()

        if not just_fc:
            # CONV => RELU => POOL
            model.add(
                layers.Convolution2D(20, (5, 5),
                                     activation='relu',
                                     input_shape=input_shape))
            model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
            # CONV => RELU => POOL
            model.add(layers.Convolution2D(50, (5, 5), activation='relu'))
            model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
            # Flatten => RELU layers
            model.add(layers.Flatten())
        else:
            model.add(layers.Flatten(input_shape=input_shape))
        model.add(layers.Dense(500, activation='relu'))
        # a softmax classifier
        model.add(layers.Dense(classes, activation="softmax"))
        return model
Exemple #22
0
def build(input_shape, classes):
	model = models.Sequential() 
	model.add(layers.Convolution2D(32, (3, 3), activation='relu',
                        input_shape=input_shape))
	model.add(layers.MaxPooling2D(pool_size=(2, 2)))
	model.add(layers.Dropout(0.25)) 

	model.add(layers.Flatten())
	model.add(layers.Dense(512, activation='relu'))
	model.add(layers.Dropout(0.5))
	model.add(layers.Dense(classes, activation='softmax'))
	return model
def build_model(input_shape, classes):
    model = models.Sequential()
    # 1st block
    model.add(layers.Convolution2D(32, (3, 3), padding='same', input_shape=input_shape, activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.Convolution2D(32, (3, 3), padding='same', activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.2))
    # 2nd block
    model.add(layers.Convolution2D(64, (3, 3), padding='same', activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.Convolution2D(64, (3, 3), padding='same', activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.3))
    # 3d block
    model.add(layers.Convolution2D(128, (3, 3), padding='same', activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.Convolution2D(128, (3, 3), padding='same', activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.4))
    # dense
    model.add(layers.Flatten())
    model.add(layers.Dense(classes, activation='softmax'))
    model.summary()
    return model
    def build_model(self):
        """
        Build the model

        Returns:
            Model : Keras model instance
        """

        print('Creating DenseNet %s' % __version__)
        print('#############################################')
        print('Dense blocks: %s' % self.dense_blocks)
        print('Layers per dense block: %s' % self.dense_layers)
        print('#############################################')

        img_input = layers.Input(shape=self.input_shape, name='img_input')
        nb_channels = self.growth_rate

        # Initial convolution layer
        x = layers.Convolution2D(2 * self.growth_rate, (3, 3),
                                 padding='same',
                                 strides=(1, 1),
                                 kernel_regularizer=keras.regularizers.l2(
                                     self.weight_decay))(img_input)

        # Building dense blocks
        for block in range(self.dense_blocks - 1):
            # Add dense block
            x, nb_channels = self.dense_block(x, self.dense_layers[block],
                                              nb_channels, self.growth_rate,
                                              self.dropout_rate,
                                              self.bottleneck,
                                              self.weight_decay)

            # Add transition_block
            x = self.transition_layer(x, nb_channels, self.dropout_rate,
                                      self.compression, self.weight_decay)
            nb_channels = int(nb_channels * self.compression)

        # Add last dense block without transition but for that with global average pooling
        x, nb_channels = self.dense_block(x, self.dense_layers[-1],
                                          nb_channels, self.growth_rate,
                                          self.dropout_rate, self.bottleneck,
                                          self.weight_decay)
        x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)
        x = layers.GlobalAveragePooling2D()(x)
        prediction = layers.Dense(self.nb_classes, activation='softmax')(x)

        return keras.Model(inputs=img_input,
                           outputs=prediction,
                           name='densenet')
 def __init__(self):
     # initialize CNN
     self.model = tf.keras.Sequential()
     # 1. 1st convolution layer
     # relu activation funtion -> to avoid negative numbers
     self.model.add(
         layers.Convolution2D(16, (3, 3),
                              input_shape=(48, 48, 1),
                              activation='relu')
     )  # number of filters, filterShape, inputShape
     # 2. Maxpooling
     self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
     # 3. 2nd convolution layer
     self.model.add(layers.Convolution2D(32, (3, 3), activation='relu'))
     # 4. Maxpooling
     self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
     # 5. 3rd convolution layer
     self.model.add(layers.Convolution2D(64, (3, 3), activation='relu'))
     # 6. Maxpooling
     self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
     # 7 flatten output of maxPooling - Flatten()
     self.model.add(layers.Flatten())
     # fully connected layer - Dense()
     # 8. input layer
     self.model.add(layers.Dense(units=512, activation='relu'))
     # 9. hidden layer
     self.model.add(layers.Dense(units=1024, activation='relu'))
     # 10. drop out
     self.model.add(layers.Dropout(0.3))
     # 11. output layer - emtion 카테고리 범위 : 0~6 => output layer의 node 갯수 : 7
     self.model.add(layers.Dense(units=7, activation='sigmoid'))
     # 12. compile CNN
     # loss function(=cost function) calculates loss  -> to find best weights, have to find the lowest loss
     # optimizer -> update weights
     self.model.compile(optimizer='adam',
                        loss='sparse_categorical_crossentropy',
                        metrics=['accuracy'])
Exemple #26
0
    def transition_layer(self, x, nb_channels, dropout_rate=None, compression=1.0, weight_decay=1e-4):
        """
        Creates a transition layer between dense blocks as transition, which do convolution and pooling.
        Works as downsampling.
        """

        x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)
        x = layers.Convolution2D(int(nb_channels * compression), (1, 1), padding='same')(x)

        # Adding dropout
        if dropout_rate:
            x = layers.Dropout(dropout_rate)(x)

        x = layers.AveragePooling2D((2, 2), strides=(2, 2))(x)
        return x
def test_lulc_cnn(num_classes=16, p=5, b=220, name='IndianPines'):
    
    model = tf.keras.Sequential([
            layers.BatchNormalization(axis=-1, input_shape=(p, p, 220)),
            layers.Convolution2D(32, (3, 3), padding='same', activation='relu', data_format='channels_last'),
            layers.BatchNormalization(axis=-1),
            layers.MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', data_format='channels_last'),
            layers.Flatten(),
            layers.Dense(512),
            layers.Dropout(rate=0.2),
            layers.Dense(num_classes, activation='softmax')
        ], name=name)
    model.compile(optimizer=tf.train.AdamOptimizer(0.01),
                    loss=tf.keras.losses.categorical_crossentropy,
                    metrics=[tf.keras.metrics.categorical_accuracy])
    return model
    def create_model(self):
        # Our input feature map is 150x150x3: 150x150 for the image pixels, and 3 for
        # the three color channels: R, G, and B
        img_input = layers.Input(shape=(150, 150, 3))

        # First convolution extracts 16 filters that are 3x3
        # Convolution is followed by max-pooling layer with a 2x2 window
        x = layers.Conv2D(16, 3, activation='relu')(img_input)
        x = layers.MaxPooling2D(2)(x)

        # Second convolution extracts 32 filters that are 3x3
        # Convolution is followed by max-pooling layer with a 2x2 window
        x = layers.Conv2D(32, 3, activation='relu')(x)
        x = layers.MaxPooling2D(2)(x)

        # Third convolution extracts 64 filters that are 3x3
        # Convolution is followed by max-pooling layer with a 2x2 window
        x = layers.Convolution2D(64, 3, activation='relu')(x)
        x = layers.MaxPooling2D(2)(x)

        # Flatten feature map to a 1-dim tensor
        x = layers.Flatten()(x)

        # Create a fully connected layer with ReLU activation and 512 hidden units
        x = layers.Dense(512, activation='relu')(x)

        # Add a dropout rate of 0.5
        x = layers.Dropout(0.5)(x)

        # Create output layer with a single node and sigmoid activation
        output = layers.Dense(1, activation='sigmoid')(x)

        # Configure and compile the model
        model = Model(img_input, output)
        model.compile(
            loss='binary_crossentropy',
            optimizer=RMSprop(lr=0.001),
            metrics=['acc']
        )
        return model
Exemple #29
0
def build_model(n_kernels, kernel_size, stride, n_dense):
    model = tf.keras.models.Sequential()
    model.add(layers.Convolution2D(filters=n_kernels,kernel_size=(kernel_size,kernel_size),activation='relu',input_shape=(16,16,1)))
    model.add(layers.BatchNormalization())
    model.add(layers.MaxPooling2D(strides=(stride,stride)))
    model.add(layers.Dropout(rate=0.25))
    model.add(layers.Flatten())
    model.add(layers.Dense(n_dense,activation='relu'))
    model.add(layers.Dropout(rate=0.5))
    model.add(layers.Dense(10,activation='softmax'))
    
    adamOptimizer= tf.keras.optimizers.Adam(lr=0.0001)
    model.compile(optimizer=adamOptimizer,loss='categorical_crossentropy')
    
    annealer = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x)
    history = model.fit(x_trn,y_trn,epochs=2,batch_size =16, verbose=2, 
                        validation_data=(x_val, y_val),callbacks=[annealer])
    
    tstError=model.evaluate(x_tst,y_tst)
    trnError=model.evaluate(x_trn,y_trn)    
    noOfParams=model.count_params()
    return (tstError,trnError,noOfParams)
Exemple #30
0
def create_model(input_shape=INPUT_SHAPE_384_512):
    model = models.Sequential([
        layers.Convolution2D(
            64,
            3,
            strides=2,
            padding="same",
            activation="relu",
            input_shape=input_shape,
        ),
        layers.Convolution2D(64, 3, strides=2, activation="relu"),
        layers.MaxPooling2D(pool_size=2),
        layers.Dropout(0.2),
        layers.Convolution2D(128,
                             3,
                             strides=2,
                             padding="same",
                             activation="relu"),
        layers.Convolution2D(128, 3, strides=2, activation="relu"),
        layers.MaxPooling2D(pool_size=2),
        layers.Dropout(0.2),
        layers.Convolution2D(256,
                             3,
                             strides=2,
                             padding="same",
                             activation="relu"),
        layers.Convolution2D(256,
                             3,
                             strides=2,
                             padding="same",
                             activation="relu"),
        layers.MaxPooling2D(pool_size=2),
        layers.Dropout(0.2),
        layers.Flatten(),
        layers.Dense(2048, activation="relu"),
        layers.Dropout(0.2),
        layers.Dense(6, activation="softmax"),
    ])

    model.compile(
        optimizer="adam",
        loss=keras.losses.categorical_crossentropy,
        metrics=["accuracy"],
    )

    return model