Example #1
0
def test_find_activation_layer():
    conv1_filters = 1
    conv2_filters = 1
    dense_units = 1
    model = Sequential()
    model.add(
        Conv2D(conv1_filters, [3, 3],
               input_shape=(28, 28, 1),
               data_format="channels_last",
               name='conv_1'))
    model.add(Activation('relu', name='act_1'))
    model.add(MaxPool2D((2, 2), name='pool_1'))
    model.add(
        Conv2D(conv2_filters, [3, 3],
               data_format="channels_last",
               name='conv_2'))
    model.add(Activation('relu', name='act_2'))
    model.add(MaxPool2D((2, 2), name='pool_2'))
    model.add(Flatten(name='flat_1'))
    model.add(Dense(dense_units, name='dense_1'))
    model.add(Activation('relu', name='act_3'))
    model.add(Dense(10, name='dense_2'))
    model.add(Activation('softmax', name='act_4'))
    assert find_activation_layer(model.get_layer('conv_1'),
                                 0) == (model.get_layer('act_1'), 0)
    assert find_activation_layer(model.get_layer('conv_2'),
                                 0) == (model.get_layer('act_2'), 0)
    assert find_activation_layer(model.get_layer('dense_1'),
                                 0) == (model.get_layer('act_3'), 0)
    assert find_activation_layer(model.get_layer('dense_2'),
                                 0) == (model.get_layer('act_4'), 0)
Example #2
0
def build_model(img_rows, img_cols, optimizer= 'adam'):
    '''
    This function creates the CNN model
    '''
    # create model
    model = Sequential()
    # add layer
    model.add(Conv2D(filters = 32, kernel_size=(5, 5),
                     activation='relu',padding = 'SAME',
                     strides=2,
                     input_shape=(img_rows, img_cols, 1)))
    model.add(Conv2D(filters = 32, kernel_size=(5, 5),
                     activation='relu',padding = 'SAME',
                     strides=2))
    model.add(MaxPool2D(pool_size=(3,3),padding='SAME'))

    model.add(Dropout(0.5))
    
    model.add(Conv2D(filters = 64, strides=2, kernel_size=(3, 3),padding = 'SAME',  activation='relu'))
    model.add(Conv2D(filters = 64, strides=2, kernel_size = (3,3),padding = 'SAME', activation ='relu'))
    model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
    # avoid overfitting
    model.add(Dropout(0.5))
    
    
    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))   # binary prediciton
    
    # compile model
    model.compile(loss=keras.losses.binary_crossentropy,
                      optimizer= optimizer,
                      metrics=['acc'])
    return model
def build_model():
    model = Sequential()
    model.add(Conv2D(64, (5, 5), (1, 1), "SAME", activation="relu", input_shape=(306, 408, 3)))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(64, (5, 5), (1, 1), "SAME", activation="relu"))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(64, (5, 5), padding="SAME", activation='relu'))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Conv2D(16, (5, 5), padding="SAME", activation='relu'))
    model.add(MaxPool2D((3, 3), (2, 2), 'same'))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(512, activation='relu'))
    model.add(Dense(8, activation='relu'))
    optimizer = Adadelta()
    model.compile(optimizer, loss=mean_squared_error)
    print(model.summary())
    train_X, train_y = GET_DATA.get_batches_data()
    cost_values = []
    for step in range(1000):
        cost = model.train_on_batch(train_X, train_y)
        cost_values.append(cost)
        if step % 10 == 0:
            print("step %d , cost value is %.3f" % (step, cost))
    model.save("./model1.h5")
    plt.plot(cost_values)
    plt.show()
Example #4
0
 def createModel(X_train):
   model = Sequential()
   #
   model.add(Conv2D(filters = 16, kernel_size = (3,3),padding = 'Same', 
                   activation ='relu', input_shape = (X_train.shape[1],X_train.shape[2],X_train.shape[3])))
   model.add(MaxPool2D(pool_size=(2,2)))
   model.add(Dropout(0.25))
   #
   model.add(Conv2D(filters = 32, kernel_size = (3,3),padding = 'Same', 
                   activation ='relu'))
   model.add(MaxPool2D(pool_size=(2,2)))
   model.add(Dropout(0.25))
     ##decode
   model.add(Conv2D(filters = 16, strides=(2,2), kernel_size = (3,3),padding = 'Same', 
                   activation ='relu'))
   #model.add(UpSampling2D((2,2)))
   model.add(Conv2D(filters = 16, kernel_size = (3,3),padding = 'Same', 
                   activation ='relu'))
   model.add(UpSampling2D((2,2)))
   ##
   # fully connected
   model.add(Flatten())
   model.add(Dense(256, activation = "relu"))
   model.add(Dropout(0.5))
   model.add(Dense(10, activation = "softmax"))
   #%
   # Define the optimizer
   optimizer = tf.keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
   #%
   model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
   model.summary()
   return model
Example #5
0
def squeezenet(inputs, num_classes=1000, include_top=False):

    with tf.name_scope('SqueezeNet'):
        fine_grained_layers = []

        # define the model of SqueezeNet
        x = Conv2D(64, (3, 3), strides=(2, 2), padding='valid',
                   name='conv1')(inputs)
        x = Activation('relu', name='relu_conv1')(x)
        x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)

        x = fire_module(x, fire_id=2, squeeze=16, expand=64)
        x = fire_module(x, fire_id=3, squeeze=16, expand=64)
        x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)

        x = fire_module(x, fire_id=4, squeeze=32, expand=128)
        x = fire_module(x, fire_id=5, squeeze=32, expand=128)
        fine_grained_layers.append(x)
        x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)

        x = fire_module(x, fire_id=6, squeeze=48, expand=192)
        x = fire_module(x, fire_id=7, squeeze=48, expand=192)
        x = fire_module(x, fire_id=8, squeeze=64, expand=256)
        x = fire_module(x, fire_id=9, squeeze=64, expand=256)

        if include_top:
            x = Conv2D(num_classes, (1, 1), padding='valid', name='conv10')(x)
            x = Activation('relu', name='relu_conv10')(x)
            x = GlobalAvgPool2D()(x)
            x = Activation('softmax', name='loss')(x)

        return x, fine_grained_layers
    def __init__(self, optimizer=Adam(), loss=categorical_crossentropy, metrics=[categorical_accuracy, 'top_k_categorical_accuracy']):
        super().__init__(optimizer=optimizer, loss=loss, metrics=metrics, MODEL_NAME=CIFAR_100_CONV_NAME)
        self.sequential_layers = [
            Conv2D(64, [3, 3], padding='same', activation='elu', kernel_regularizer=l2(), input_shape=(32,32,3)),
            BatchNormalization(),
            Conv2D(64, [3, 3], padding='same', activation='elu', kernel_regularizer=l2()),
            BatchNormalization(),
            MaxPool2D(),
            Dropout(rate=0.3),

            Conv2D(128, [3, 3], padding='same', activation='elu', kernel_regularizer=l2()),
            BatchNormalization(),
            Conv2D(128, [3, 3], padding='same', activation='elu', kernel_regularizer=l2()),
            BatchNormalization(),
            MaxPool2D(),
            Dropout(rate=0.4),

            Conv2D(256, [3, 3], padding='same', activation='elu', kernel_regularizer=l2()),
            BatchNormalization(),
            Conv2D(256, [3, 3], padding='same', activation='elu', kernel_regularizer=l2()),
            BatchNormalization(),
            MaxPool2D(),
            Dropout(rate=0.5),

            Flatten(),
            Dense(100, activation='softmax')
        ]
def default_linear():
    "fully connected version of the default linear model"
    img_in = Input(shape=(224, 224, 3), name='img_in')
    x = img_in

    # Convolution2D class name is an alias for Conv2D
    x = Convolution2D(filters=64, kernel_size=(5, 5), strides=(2, 2), activation='elu')(x) #output shape 110x110
    
    x = Convolution2D(filters=64, kernel_size=(3, 3), strides=(2, 2), activation='elu')(x) #output shape 27x27
    x = MaxPool2D(pool_size=(2,2))(x) 
    x = Convolution2D(filters=64, kernel_size=(3, 3), strides=(2, 2), activation='relu')(x) #output shape 7x7
    x = MaxPool2D(pool_size=(2,2))(x)
    x = Convolution2D(filters=5, kernel_size=(3,3), strides=(2, 2), activation='relu')(x)
    x = MaxPool2D(pool_size=(2,2))(x) #output 5x5
    control_out = Flatten(name='control_out')(x)
    # control_out = Dense(units=4, activation='relu', name='control_out')(x)

    # continous output of throttle for later possibly
    # throttle_out = Dense(units=1, activation='linear', name='throttle_out')(x)

    model = Model(inputs=[img_in], outputs=[control_out])

    model.compile(optimizer='adam',
                  loss={'control_out': 'categorical_crossentropy'},
                  metrics=['acc'])

    return model
def Mildnet_vgg16_big():
    vgg_model = VGG16(weights="imagenet", include_top=False, input_shape=(224, 224, 3))

    for layer in vgg_model.layers[:10]:
        layer.trainable = False

    intermediate_layer_outputs = get_layers_output_by_name(vgg_model,
                                                           ["block1_pool", "block2_pool", "block3_pool", "block4_pool"])
    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    for layer_name, output in intermediate_layer_outputs.items():
        output = GlobalAveragePooling2D()(output)
        convnet_output = concatenate([convnet_output, output])

    convnet_output = Dense(2048, activation='relu')(convnet_output)
    convnet_output = Dropout(0.6)(convnet_output)
    convnet_output = Dense(2048, activation='relu')(convnet_output)
    convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(convnet_output)

    first_conv = Conv2D(96, kernel_size=(8, 8), strides=(16, 16), padding='same')(vgg_model.input)
    first_max = MaxPool2D(pool_size=(3, 3), strides=(4, 4), padding='same')(first_conv)
    first_max = Flatten()(first_max)
    first_max = Lambda(lambda x: K.l2_normalize(x, axis=1))(first_max)

    second_conv = Conv2D(96, kernel_size=(8, 8), strides=(32, 32), padding='same')(vgg_model.input)
    second_max = MaxPool2D(pool_size=(7, 7), strides=(2, 2), padding='same')(second_conv)
    second_max = Flatten()(second_max)
    second_max = Lambda(lambda x: K.l2_normalize(x, axis=1))(second_max)

    merge_one = concatenate([first_max, second_max])
    merge_two = concatenate([merge_one, convnet_output], axis=1)
    emb = Dense(4096)(merge_two)
    l2_norm_final = Lambda(lambda x: K.l2_normalize(x, axis=1))(emb)
    final_model = tf.keras.models.Model(inputs=vgg_model.input, outputs=l2_norm_final)

    return final_model
Example #9
0
def get_model():
    input_shape = (32, 32, 3)
    model = Sequential([
        Conv2D(16, (3, 3),
               input_shape=input_shape,
               padding='same',
               activation='relu'),
        Dropout(0.2, noise_shape=None, seed=None),
        MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
        Conv2D(32, (3, 3), activation='relu', padding='same'),
        Dropout(0.2, noise_shape=None, seed=None),
        MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
        Conv2D(64, (3, 3), activation='relu', padding='same'),
        Dropout(0.2, noise_shape=None, seed=None),
        MaxPool2D(pool_size=(2, 2), strides=(2, 2)),
        Flatten(),
        Dropout(0.2, noise_shape=None, seed=None),
        Dense(64, activation='relu'),
        Dropout(0.2, noise_shape=None, seed=None),
        Dense(10, activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print('Gamma'.ljust(40, '.'))
    print(model)
    return model
def ranknet():
    vgg_model = VGG19(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    convnet_output = Dense(4096, activation='relu')(convnet_output)
    convnet_output = Dropout(0.5)(convnet_output)
    convnet_output = Dense(4096, activation='relu')(convnet_output)
    convnet_output = Dropout(0.5)(convnet_output)
    convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(convnet_output)

    s1 = MaxPool2D(pool_size=(4, 4), strides=(4, 4), padding='valid')(vgg_model.input)
    s1 = ZeroPadding2D(padding=(4, 4), data_format=None)(s1)
    s1 = Conv2D(96, kernel_size=(8, 8), strides=(4, 4), padding='valid')(s1)
    s1 = ZeroPadding2D(padding=(2, 2), data_format=None)(s1)
    s1 = MaxPool2D(pool_size=(7, 7), strides=(4, 4), padding='valid')(s1)
    s1 = Flatten()(s1)

    s2 = MaxPool2D(pool_size=(8, 8), strides=(8, 8), padding='valid')(vgg_model.input)
    s2 = ZeroPadding2D(padding=(4, 4), data_format=None)(s2)
    s2 = Conv2D(96, kernel_size=(8, 8), strides=(4, 4), padding='valid')(s2)
    s2 = ZeroPadding2D(padding=(1, 1), data_format=None)(s2)
    s2 = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='valid')(s2)
    s2 = Flatten()(s2)

    merge_one = concatenate([s1, s2])
    merge_one_norm = Lambda(lambda x: K.l2_normalize(x, axis=1))(merge_one)
    merge_two = concatenate([merge_one_norm, convnet_output], axis=1)
    emb = Dense(4096)(merge_two)
    l2_norm_final = Lambda(lambda x: K.l2_normalize(x, axis=1))(emb)

    final_model = tf.keras.models.Model(inputs=vgg_model.input, outputs=l2_norm_final)

    return final_model
def visnet_model():
    vgg_model = VGG16(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
    convnet_output = GlobalAveragePooling2D()(vgg_model.output)
    convnet_output = Dense(4096, activation='relu')(convnet_output)
    convnet_output = Dropout(0.6)(convnet_output)
    convnet_output = Dense(4096, activation='relu')(convnet_output)
    convnet_output = Dropout(0.6)(convnet_output)
    convnet_output = Lambda(lambda x: K.l2_normalize(x, axis=1))(convnet_output)

    first_conv = Conv2D(96, kernel_size=(8, 8), strides=(16, 16), padding='same')(vgg_model.input)
    first_max = MaxPool2D(pool_size=(3, 3), strides=(4, 4), padding='same')(first_conv)
    first_max = Flatten()(first_max)
    first_max = Lambda(lambda x: K.l2_normalize(x, axis=1))(first_max)

    second_conv = Conv2D(96, kernel_size=(8, 8), strides=(32, 32), padding='same')(vgg_model.input)
    second_max = MaxPool2D(pool_size=(7, 7), strides=(2, 2), padding='same')(second_conv)
    second_max = Flatten()(second_max)
    second_max = Lambda(lambda x: K.l2_normalize(x, axis=1))(second_max)

    merge_one = concatenate([first_max, second_max])
    merge_two = concatenate([merge_one, convnet_output], axis=1)
    emb = Dense(4096)(merge_two)
    l2_norm_final = Lambda(lambda x: K.l2_normalize(x, axis=1))(emb)
    final_model = tf.keras.models.Model(inputs=vgg_model.input, outputs=l2_norm_final)

    return final_model
Example #12
0
 def build(self):
     inLayer = Input(self.inputShape, self.batchSize)
     x = conv_block(32, kernelSize = 3)(inLayer, training = self.training)
     x = MaxPool2D((2, 2), strides = (2, 2))(x)
     x = conv_block(64, kernelSize = 3)(x, training = self.training)
     x = MaxPool2D((2, 2), strides = (2, 2))(x)
     x = conv_block(128, kernelSize = 3)(x, training = self.training)
     x = conv_block(64, kernelSize = 1)(x, training = self.training)
     x = conv_block(128, kernelSize = 3)(x, training = self.training)
     x = MaxPool2D((2, 2), strides = (2, 2))(x)
     x = conv_block(256, kernelSize = 3)(x, training = self.training)
     x = conv_block(128, kernelSize = 1)(x, training = self.training)
     x = conv_block(256, kernelSize = 3)(x, training = self.training)
     x = MaxPool2D((2, 2), strides = (2, 2))(x)
     x = conv_block(512, kernelSize = 3)(x, training = self.training)
     x = conv_block(256, kernelSize = 1)(x, training = self.training)
     x = conv_block(512, kernelSize = 3)(x, training = self.training)
     x = conv_block(256, kernelSize = 1)(x, training = self.training)
     x = conv_block(512, kernelSize = 3)(x, training = self.training)
     x = MaxPool2D((2, 2), strides=(2, 2))(x)
     x = conv_block(1024, kernelSize = 3)(x, training = self.training)
     x = conv_block(512, kernelSize = 1)(x, training = self.training)
     x = conv_block(1024, kernelSize = 3)(x, training = self.training)
     x = conv_block(512, kernelSize = 1)(x, training = self.training)
     x = conv_block(1024, kernelSize = 3)(x, training = self.training)
     mean = Conv2D(filters = self.latentSize, kernel_size = (1, 1),
                   padding = 'same')(x)
     mean = GlobalAveragePooling2D()(mean)
     logvar = Conv2D(filters = self.latentSize, kernel_size = (1, 1),
                     padding = 'same')(x)
     logvar = GlobalAveragePooling2D()(logvar)
     sample = latent_vector(self.latentConstraints, self.beta)([mean, logvar], training = self.training)
     return Model(inputs = inLayer, outputs = sample)
def get_model():
    img_tensor = Input(shape=(64, 64, 3))
    x = Conv2D(filters=32, kernel_size=(5, 5), strides=1,
               padding='same')(img_tensor)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = MaxPool2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = MaxPool2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = MaxPool2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = AvgPool2D(pool_size=(2, 2), strides=1, padding='valid')(
        x
    )  # Blurred output produced by AvgPool2D, intuitively, gives a better estimate of filters used rather than sharp one produced by MaxPool2D because in blur output the neighboring colors are aggregated and sharp outputs often contain max values due to presence of edges.
    x = Flatten()(x)
    x = Dense(units=32, activation='relu')(x)
    x = Dropout(0.25)(x)
    predicted_class = Dense(units=num_classes, activation='softmax')(x)

    model = Model(inputs=[img_tensor], outputs=[predicted_class])

    return model
Example #14
0
def mnist(data_format):

    # pylint: disable=no-member

    if data_format == 'channels_first':
        input_shape = [1, 28, 28]
    else:
        assert data_format == 'channels_last'
        input_shape = [28, 28, 1]

    return Sequential([
        Reshape(target_shape=input_shape, input_shape=(28 * 28, )),
        Conv2D(32,
               5,
               padding='same',
               data_format=data_format,
               activation=tf.nn.relu,
               kernel_initializer='random_uniform'),
        MaxPool2D((2, 2), (2, 2), padding='same', data_format=data_format),
        Conv2D(64,
               5,
               padding='same',
               data_format=data_format,
               activation=tf.nn.relu,
               kernel_initializer='random_uniform'),
        MaxPool2D((2, 2), (2, 2), padding='same', data_format=data_format),
        Flatten(),
        Dense(1024, activation=tf.nn.relu,
              kernel_initializer='random_uniform'),
        Dense(10, kernel_initializer='random_uniform')
    ])
    def build_model(self):
        print('\n----------BUILD MODEL----------\n')
        inputs = Input(shape=(IMG_WIDTH, IMG_HEIGHT, IMG_CHANNEL))

        # ----------Block 1-----------
        print('\n---Block 1---')
        x = Conv2D(filters=32, kernel_size=(3, 3),
                   padding='same', activation='relu')(inputs)
        print('Conv2D:', x.get_shape().as_list())
        x = MaxPool2D(pool_size=(2, 2), strides=2)(x)
        print('MaxPool2D:', x.get_shape().as_list())

        # ----------Block 2-----------
        print('\n---Block 2---')
        x = Conv2D(filters=64, kernel_size=(3, 3),
                   padding='same', activation='relu')(x)
        print('Conv2D:', x.get_shape().as_list())
        x = MaxPool2D(pool_size=(2, 2), strides=2)(x)
        print('MaxPool2D:', x.get_shape().as_list())
        x = BatchNormalization()(x)

        # ----------Block 3-----------
        print('\n---Block 3---')
        x = Conv2D(filters=64, kernel_size=(3, 3),
                   padding='same', activation='relu')(x)
        print('Conv2D:', x.get_shape().as_list())
        x = MaxPool2D(pool_size=(2, 2), strides=2)(x)
        print('MaxPool2D:', x.get_shape().as_list())

        # ----------Merge 2 CNNs----------
        print('\n---Merge 2 CNNs---')
        detector = x
        detector_shape = detector.get_shape().as_list()
        extractor = x
        extractor_shape = extractor.get_shape().as_list()

        detector = Reshape([detector_shape[1] * detector_shape[2], detector_shape[3]])(detector)
        print('Detector:', detector.get_shape().as_list())
        extractor = Reshape([extractor_shape[1] * extractor_shape[2], extractor_shape[3]])(extractor)
        print('Extractor:', extractor.get_shape().as_list())

        bcnn = Lambda(_outer_product)([detector, extractor])
        print('Outer product:', bcnn.get_shape().as_list())

        bcnn = Reshape([detector_shape[3] * extractor_shape[3]])(bcnn)
        print('Reshape:', bcnn.get_shape().as_list())
        bcnn = Lambda(_signed_sqrt)(bcnn)
        print('Signed square root:', bcnn.get_shape().as_list())
        bcnn = Lambda(_l2_normalise)(bcnn)
        print('L2 normalisation:', bcnn.get_shape().as_list())

        # ----------Fully Connected----------
        bcnn = Dense(units=N_CLASSES, activation='softmax')(bcnn)
        print('Softmax:', bcnn.get_shape().as_list())

        bcnn_model = Model(inputs=[inputs], outputs=[bcnn])

        return bcnn_model
Example #16
0
def darknet19(inputs, num_classes=1000, include_top=False):
    """
  DarkNet-19 Architecture Definition

  Args:
    inputs:
    num_classes:
    include_top:

  Returns:
    x: model definition
    fine_grained_layers - a list of fine_grained layers (for detection)
  """
    pass_through_layers = []
    with tf.name_scope('DarkNet19'):
        inputs = Preprocessor(yolov2_preprocess_func,
                              name='preprocessor')(inputs)
        x = conv_block(inputs, 32, (3, 3), name="Conv2d_1")
        x = MaxPool2D(strides=2, name="MaxPool2d_1")(x)

        x = conv_block(x, 64, (3, 3), name="Conv2d_2")
        x = MaxPool2D(strides=2, name="MaxPool2d_2")(x)

        x = conv_block(x, 128, (3, 3), name="Conv2d_3")
        x = conv_block(x, 64, (1, 1), name="Conv2d_4")
        x = conv_block(x, 128, (3, 3), name="Conv2d_5")
        x = MaxPool2D(strides=2, name="MaxPool2d_3")(x)

        x = conv_block(x, 256, (3, 3), name="Conv2d_6")
        x = conv_block(x, 128, (1, 1), name="Conv2d_7")
        x = conv_block(x, 256, (3, 3), name="Conv2d_8")
        x = MaxPool2D(strides=2, name="MaxPool2d_4")(x)

        x = conv_block(x, 512, (3, 3), name="Conv2d_9")
        x = conv_block(x, 256, (1, 1), name="Conv2d_10")
        x = conv_block(x, 512, (3, 3), name="Conv2d_11")
        x = conv_block(x, 256, (1, 1), name="Conv2d_12")
        x = conv_block(x, 512, (3, 3), name="Conv2d_13")
        with tf.name_scope('PassThroughLayers'):
            pass_through_layers.append(x)
        x = MaxPool2D(strides=2, name="MaxPool2d_5")(x)

        x = conv_block(x, 1024, (3, 3), name="Conv2d_14")
        x = conv_block(x, 512, (1, 1), name="Conv2d_15")
        x = conv_block(x, 1024, (3, 3), name="Conv2d_16")
        x = conv_block(x, 512, (1, 1), name="Conv2d_17")
        x = conv_block(x, 1024, (3, 3),
                       name="Conv2d_18")  # ---> feature extraction ends here

        if include_top:
            x = Conv2D(num_classes, (1, 1),
                       activation='linear',
                       padding='same')(x)
            x = GlobalAvgPool2D()(x)
            x = Activation(activation='softmax')(x)
            x = Model(inputs, x)

    return x, pass_through_layers
Example #17
0
def VGG11(input_shape, nb_classes, dropout=False, dropout_rate=0.2):
    """
    Creates a vgg11 network.

    Parameters
    ----------
    input_shape : tuple
        The shape of the input tensor not including the sample axis.
        Tensorflow uses the NHWC dimention ordering convention.
    nb_class : int
        The number of output class. The network will have this number of
        output nodes for one-hot encoding.
    dropout : bool
        Where or not to implement dropout in the fully-connected layers.
    dropout_rate : float
        Dropout rate.

    Returns
    -------
    keras.models.Sequential() :
        The create vgg11 network.
    """
    vgg11 = Sequential()

    # sub-net 1
    vgg11.add(
        Conv2D(filters=8,
               kernel_size=3,
               padding='same',
               activation='relu',
               input_shape=input_shape))
    vgg11.add(
        Conv2D(filters=8, kernel_size=3, padding='same', activation='relu'))
    vgg11.add(MaxPool2D(pool_size=2))

    # sub-net 2
    vgg11.add(
        Conv2D(filters=12, kernel_size=3, padding='same', activation='relu'))
    vgg11.add(
        Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
    vgg11.add(MaxPool2D(pool_size=2))

    # sub-net 3
    vgg11.add(
        Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
    vgg11.add(
        Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
    vgg11.add(MaxPool2D(pool_size=2))

    # dense layers
    vgg11.add(Flatten())
    vgg11.add(Dense(units=64, activation='relu'))
    vgg11.add(Dropout(dropout_rate)) if dropout else None
    vgg11.add(Dense(units=64, activation='relu'))
    vgg11.add(Dropout(dropout_rate)) if dropout else None
    vgg11.add(Dense(units=nb_classes, activation='softmax'))

    return vgg11
Example #18
0
    def model_description(self):
        filter_sizes = [3, 4, 5]
        num_filters = 10
        drop = 0.1
        embedding_dim = self.EMBEDDING_DIMENTION

        inputs = Input(shape=(self.MAX_SEQ_LEN, ), dtype='int32')
        embedding = self.embedding_layer()(inputs)

        reshape = Reshape((self.MAX_SEQ_LEN, embedding_dim, 1))(embedding)

        conv_0 = Conv2D(num_filters,
                        kernel_size=(filter_sizes[0], embedding_dim),
                        padding='valid',
                        kernel_initializer='normal',
                        activation='relu')(reshape)
        conv_1 = Conv2D(num_filters,
                        kernel_size=(filter_sizes[1], embedding_dim),
                        padding='valid',
                        kernel_initializer='normal',
                        activation='relu')(reshape)
        conv_2 = Conv2D(num_filters,
                        kernel_size=(filter_sizes[2], embedding_dim),
                        padding='valid',
                        kernel_initializer='normal',
                        activation='relu')(reshape)

        maxpool_0 = MaxPool2D(pool_size=(self.MAX_SEQ_LEN - filter_sizes[0] +
                                         1, 1),
                              strides=(1, 1),
                              padding='valid')(conv_0)
        maxpool_1 = MaxPool2D(pool_size=(self.MAX_SEQ_LEN - filter_sizes[1] +
                                         1, 1),
                              strides=(1, 1),
                              padding='valid')(conv_1)
        maxpool_2 = MaxPool2D(pool_size=(self.MAX_SEQ_LEN - filter_sizes[2] +
                                         1, 1),
                              strides=(1, 1),
                              padding='valid')(conv_2)

        concatenated_tensor = Concatenate(axis=1)(
            [maxpool_0, maxpool_1, maxpool_2])
        flatten = Flatten()(concatenated_tensor)
        dropout = Dropout(drop)(flatten)
        output = Dense(self.num_labels(), activation=self.ACTIVATION)(dropout)

        # this creates a model that includes
        model = Model(inputs=inputs, outputs=output)

        # adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(optimizer='adam',
                      loss=self.LOSS_FUNCTION,
                      metrics=['accuracy'])

        return model
def old_default_linear():
    img_in = Input(shape=(120, 160, 3), name='img_in')
    x = img_in

    # Convolution2D class name is an alias for Conv2D
    x = Convolution2D(filters=24,
                      kernel_size=(5, 5),
                      strides=(2, 2),
                      activation='elu')(x)
    x = MaxPool2D()(x)
    x = Convolution2D(filters=32,
                      kernel_size=(5, 5),
                      strides=(2, 2),
                      activation='elu')(x)
    x = MaxPool2D()(x)
    x = Convolution2D(filters=64,
                      kernel_size=(5, 5),
                      strides=(2, 2),
                      activation='elu')(x)
    x = Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(2, 2),
                      activation='elu')(x)
    x = Convolution2D(filters=64,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      activation='elu')(x)

    x = Flatten(name='flattened')(x)
    x = Dense(units=100, activation='linear')(x)
    x = Dropout(rate=.1)(x)
    x = Dense(units=50, activation='linear')(x)
    x = Dropout(rate=.1)(x)
    # categorical output of the angle
    angle_out = Dense(units=1, activation='linear', name='angle_out')(x)

    # continous output of throttle
    throttle_out = Dense(units=1, activation='linear', name='throttle_out')(x)

    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])

    model.compile(optimizer='adam',
                  loss={
                      'angle_out': 'mean_squared_error',
                      'throttle_out': 'mean_squared_error'
                  },
                  loss_weights={
                      'angle_out': 0.5,
                      'throttle_out': .5
                  })

    return model
    def encoder(x):

        enc_1 = Conv2D(input_shape=input_shape,
                       filters=64,
                       kernel_size=3,
                       padding='same',
                       kernel_regularizer=regularizers.l2(weight_decay),
                       name='conv_1')(x)
        enc_1 = BatchNormalization(name='batch_1')(enc_1)
        enc_1 = Activation('relu')(enc_1)
        enc_1 = Conv2D(filters=64,
                       kernel_size=3,
                       padding='same',
                       kernel_regularizer=regularizers.l2(weight_decay))(enc_1)
        enc_1 = BatchNormalization()(enc_1)
        enc_1 = Activation('relu')(enc_1)

        max_1 = MaxPool2D(strides=2, padding='same')(enc_1)

        enc_2 = Conv2D(filters=128,
                       kernel_size=3,
                       padding='same',
                       kernel_regularizer=regularizers.l2(weight_decay),
                       name='conv_2')(max_1)
        enc_2 = BatchNormalization(name='batch_2')(enc_2)
        enc_2 = Activation('relu')(enc_2)
        enc_2 = Conv2D(filters=128,
                       kernel_size=3,
                       padding='same',
                       kernel_regularizer=regularizers.l2(weight_decay))(enc_2)
        enc_2 = BatchNormalization()(enc_2)
        enc_2 = Activation('relu')(enc_2)

        max_2 = MaxPool2D(strides=2, padding='same')(enc_2)

        enc_4 = Conv2D(filters=256,
                       kernel_size=3,
                       padding='same',
                       kernel_regularizer=regularizers.l2(weight_decay),
                       name='conv_3')(max_2)
        enc_4 = BatchNormalization(name='batch_3')(enc_4)
        enc_4 = Activation('relu')(enc_4)
        enc_4 = Conv2D(filters=256,
                       kernel_size=3,
                       padding='same',
                       kernel_regularizer=regularizers.l2(weight_decay))(enc_4)
        enc_4 = BatchNormalization()(enc_4)
        enc_4 = Activation('relu')(enc_4)

        max_4 = MaxPool2D(strides=2, padding='same')(enc_4)

        return max_4, max_2, max_1
Example #21
0
 def build(self, x, nb_classes):
     _x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
     _x = Conv2D(16, (3, 3), activation='relu', padding='same')(_x)
     _x = MaxPool2D()(_x)
     _x = Conv2D(32, (3, 3), activation='relu', padding='same')(_x)
     _x = Conv2D(32, (3, 3), activation='relu', padding='same')(_x)
     _x = MaxPool2D()(_x)
     feature_map = _x
     # _x = GlobalAveragePooling2D()(_x)
     _x = Flatten()(_x)
     _x = Dense(512, activation='relu')(_x)
     logits = Dense(nb_classes, activation=None)(_x)
     outputs = Activation('softmax')(logits)
     return feature_map, logits, outputs
Example #22
0
    def Build(self):
        # create the input layer for feeding the netowrk
        inLayer = Input(self.inputShape, self.batchSize)
        net = ConvBnLRelu(32, kernelSize=3)(inLayer,
                                            training=self.training)  # 1
        net = MaxPool2D((2, 2), strides=(2, 2))(net)

        net = ConvBnLRelu(64, kernelSize=3)(net, training=self.training)  # 2
        net = MaxPool2D((2, 2), strides=(2, 2))(net)

        net = ConvBnLRelu(128, kernelSize=3)(net, training=self.training)  # 3
        net = ConvBnLRelu(64, kernelSize=1)(net, training=self.training)  # 4
        net = ConvBnLRelu(128, kernelSize=3)(net, training=self.training)  # 5
        net = MaxPool2D((2, 2), strides=(2, 2))(net)

        net = ConvBnLRelu(256, kernelSize=3)(net, training=self.training)  # 6
        net = ConvBnLRelu(128, kernelSize=1)(net, training=self.training)  # 7
        net = ConvBnLRelu(256, kernelSize=3)(net, training=self.training)  # 8
        net = MaxPool2D((2, 2), strides=(2, 2))(net)

        net = ConvBnLRelu(512, kernelSize=3)(net, training=self.training)  # 9
        net = ConvBnLRelu(256, kernelSize=1)(net, training=self.training)  # 10
        net = ConvBnLRelu(512, kernelSize=3)(net, training=self.training)  # 11
        net = ConvBnLRelu(256, kernelSize=1)(net, training=self.training)  # 12
        net = ConvBnLRelu(512, kernelSize=3)(net, training=self.training)  # 13
        net = MaxPool2D((2, 2), strides=(2, 2))(net)

        net = ConvBnLRelu(1024, kernelSize=3)(net,
                                              training=self.training)  # 14
        net = ConvBnLRelu(512, kernelSize=1)(net, training=self.training)  # 15
        net = ConvBnLRelu(1024, kernelSize=3)(net,
                                              training=self.training)  # 16
        net = ConvBnLRelu(512, kernelSize=1)(net, training=self.training)  # 17
        net = ConvBnLRelu(1024, kernelSize=3)(net,
                                              training=self.training)  # 18

        # variational encoder output (distributions)
        mean = Conv2D(filters=self.latentSize,
                      kernel_size=(1, 1),
                      padding='same')(net)
        mean = GlobalAveragePooling2D()(mean)
        logvar = Conv2D(filters=self.latentSize,
                        kernel_size=(1, 1),
                        padding='same')(net)
        logvar = GlobalAveragePooling2D()(logvar)

        sample = SampleLayer(self.latentConstraints,
                             self.beta)([mean, logvar], training=self.training)

        return Model(inputs=inLayer, outputs=sample)
Example #23
0
def define_sequencial_model():
    # building a linear stack of layers with the sequential model
    model = Sequential()

    # convolutional layer
    model.add(
        Conv2D(50,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu',
               input_shape=(224, 224, 3)))

    # convolutional layer
    model.add(
        Conv2D(75,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(
        Conv2D(125,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu'))
    model.add(MaxPool2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    # flatten output of conv
    model.add(Flatten())

    # hidden layer
    model.add(Dense(500, activation='relu'))
    model.add(Dropout(0.4))
    model.add(Dense(250, activation='relu'))
    model.add(Dropout(0.3))
    # output layer
    model.add(Dense(2, activation='softmax'))

    # compiling the sequential model
    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer='adam')

    # training the model for 10 epochs
    return model
Example #24
0
def create_alex_net(input_shape, out_dense, scale=True):
    model = Sequential()

    # model.add(input_shape=(32, 32, 3)))
    # Layer 1
    # model.add( Conv2D(48, kernel_size=(3,3),strides=(1,1), activation='relu', padding='same', input_shape=x_train.shape[1:] ) )
    model.add(
        Conv2D(48,
               kernel_size=(3, 3),
               strides=(1, 1),
               activation='relu',
               padding='same',
               input_shape=(input_shape)))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

    # Layer 2
    model.add(Conv2D(96, kernel_size=(3, 3), activation='relu',
                     padding='same'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

    # Layer 3
    model.add(
        Conv2D(192, kernel_size=(3, 3), activation='relu', padding='same'))

    # Layer 4
    model.add(
        Conv2D(192, kernel_size=(3, 3), activation='relu', padding='same'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

    # Layer 5
    model.add(
        Conv2D(256, kernel_size=(3, 3), activation='relu', padding='same'))
    model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

    model.add(Flatten())

    # Layer 6
    model.add(Dense(512, activation='tanh'))

    # Layer 7
    model.add(Dense(256, activation='tanh'))

    # Prediction
    model.add(Dense(out_dense, activation='softmax'))

    model.summary()

    return model
Example #25
0
def keras_estimator(model_dir, config, learning_rate):
    """Creates a CNN using Keras.

    This function creates a CNN using TensorFlow's Keras API. The Keras model is
    converted to a Tensorflow Estimator so that it can be consumed by
    SageMaker's sagemaker.tensorflow.TensorFlow API.

    Args:
      model_dir: (str) File path where training files will be written.
      config: (tf.estimator.RunConfig) Configuration options to save model.
      learning_rate: (float) Gradient Descent learning rate.

    Returns:
      A keras.Model
    """

    # Input layer name must match the feature dictionary feeding the network
    # defined in the input_fn() / _parse_fun()
    inputs = Input(shape=(28, 28, 1), name='image_input')
    x = Conv2D(
        filters=32,
        kernel_size=[3, 3],
        padding='same',
        activation=tf.nn.relu)(inputs)
    x = MaxPool2D(pool_size=(3, 3), strides=2)(x)
    x = Conv2D(
        filters=64,
        kernel_size=[3, 3],
        padding='same',
        activation=tf.nn.relu)(x)
    x = MaxPool2D(pool_size=(2, 2), strides=2)(x)
    x = Flatten()(x)
    x = Dense(128, activation=tf.nn.relu)(x)
    x = Dropout(rate=0.4)(x)
    output = Dense(10, activation=tf.nn.softmax)(x)
    model = Model(inputs, output)

    # Compile model with learning parameters.
    optimizer = Adam(lr=learning_rate)
    model.compile(
        optimizer=optimizer,
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])

    # Converts the Keras model to a TensorFlow Estimator
    estimator = model_to_estimator(
        keras_model=model, model_dir=model_dir, config=config)
    return estimator
def new_model(dropout=True, dropout_rate=0.2):
    model = Sequential()

    # sub-net 1
    model.add(
        Conv2D(filters=16,
               kernel_size=3,
               padding='same',
               activation='relu',
               input_shape=img_shape))
    model.add(
        Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
    model.add(MaxPool2D(pool_size=2))

    # sub-net 2
    model.add(
        Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
    model.add(
        Conv2D(filters=32, kernel_size=3, padding='same', activation='relu'))
    model.add(MaxPool2D(pool_size=2))

    # sub-net 3
    model.add(
        Conv2D(filters=64, kernel_size=3, padding='same', activation='relu'))
    model.add(
        Conv2D(filters=64, kernel_size=3, padding='same', activation='relu'))
    model.add(MaxPool2D(pool_size=2))

    # dense layers
    model.add(Flatten())
    model.add(BatchNormalization())
    model.add(Dense(units=64, activation='softplus'))
    model.add(Dropout(dropout_rate)) if dropout else None
    model.add(BatchNormalization())
    model.add(Dense(units=64, activation='softplus'))
    model.add(Dropout(dropout_rate)) if dropout else None
    model.add(BatchNormalization())
    model.add(Dense(units=6, activation='softplus'))
    model.add(Dropout(dropout_rate)) if dropout else None
    model.add(BatchNormalization())
    model.add(Dense(units=1, activation=expontial))

    # Compile Model
    model.compile(optimizer='adam', loss='mean_absolute_percentage_error')
    # model.compile(optimizer='adam', loss='mean_squared_logarithmic_error')
    # model.compile(optimizer='adam', loss=mean_scaled_square_error)

    return model
Example #27
0
 def n_grams_channel(self, inputs, n):
     channel = Conv2D(1,
                      kernel_size=(n, self.embedding_dim),
                      activation='relu')(inputs)
     channel_mp = MaxPool2D(pool_size=(channel.shape[1], 1))(channel)
     channel_final = Flatten()(channel_mp)
     return channel_final
    def __init__(self, n_channels):
        super(SNNonLocalBlock, self).__init__()
        self.theta = SNConv2D(n_channels // 8,
                              1,
                              strides=1,
                              padding='SAME',
                              use_bias=False)

        self.phi = SNConv2D(n_channels // 8,
                            1,
                            strides=1,
                            padding='SAME',
                            use_bias=False)

        self.max_pool = MaxPool2D(pool_size=2, strides=2)

        self.g = SNConv2D(n_channels // 2,
                          1,
                          strides=1,
                          padding='SAME',
                          use_bias=False)

        self.sigma = self.add_weight(shape=(),
                                     name="sigma",
                                     initializer='zeros',
                                     trainable=True)

        self.conv = SNConv2D(filters=n_channels,
                             kernel_size=1,
                             padding='VALID',
                             strides=1)
Example #29
0
def AttentionResNetCifar10(shape=(32, 32, 3), n_channels=32, n_classes=10):
    """
    Attention-56 ResNet for Cifar10 Dataset
    https://arxiv.org/abs/1704.06904
    """
    input_ = Input(shape=shape)
    x = Conv2D(n_channels, (5, 5), padding='same')(input_)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPool2D(pool_size=(2, 2))(x)  # 16x16

    x = residual_block(x, input_channels=32, output_channels=128)
    x = attention_block(x, encoder_depth=2)

    x = residual_block(x, input_channels=128, output_channels=256,
                       stride=2)  # 8x8
    x = attention_block(x, encoder_depth=1)

    x = residual_block(x, input_channels=256, output_channels=512,
                       stride=2)  # 4x4
    x = attention_block(x, encoder_depth=1)

    x = residual_block(x, input_channels=512, output_channels=1024)
    x = residual_block(x, input_channels=1024, output_channels=1024)
    x = residual_block(x, input_channels=1024, output_channels=1024)

    x = AveragePooling2D(pool_size=(4, 4), strides=(1, 1))(x)  # 1x1
    x = Flatten()(x)
    output = Dense(n_classes, activation='softmax')(x)

    model = Model(input_, output)
    return model
Example #30
0
def featurenet_block(x, n_filters):
    """Add a set of layers that make up one unit of the featurenet backbone
    Args:
        x (layer): Keras layer object to pass to backbone unit
        n_filters (int): Number of filters to use for convolutional layers
    Returns:
        layer: Keras layer object
    """

    # conv set 1
    x = Conv2D(n_filters, (3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last')(x)
    x = BatchNormalization(axis=-1)(x)
    x = Activation('relu')(x)
    # conv set 2
    x = Conv2D(n_filters, (3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last')(x)
    x = BatchNormalization(axis=-1)(x)
    x = Activation('relu')(x)
    # Final max pooling stage
    x = MaxPool2D(pool_size=(2, 2), data_format='channels_last')(x)

    return x