Exemple #1
0
def cnn1(time_periods,
         input_shape,
         number_of_sensors,
         number_of_classes,
         dropout_rate=0.5,
         optimizer='adam',
         activation='relu',
         weight_constraint=None,
         init_mode='glorot_uniform'):
    model = tf.keras.Sequential()
    model.add(
        tf.keras.layers.Reshape((time_periods, number_of_sensors),
                                input_shape=(input_shape, )))
    model.add(
        tf.keras.layers.Conv1D(10,
                               10,
                               input_shape=(time_periods, number_of_sensors),
                               activation=activation,
                               kernel_constraint=maxnorm(weight_constraint),
                               kernel_initializer=init_mode))
    model.add(
        tf.keras.layers.Conv1D(10,
                               10,
                               activation=activation,
                               kernel_constraint=maxnorm(weight_constraint),
                               kernel_initializer=init_mode))
    model.add(tf.keras.layers.MaxPooling1D(4))
    model.add(
        tf.keras.layers.Conv1D(16,
                               10,
                               activation=activation,
                               kernel_constraint=maxnorm(weight_constraint),
                               kernel_initializer=init_mode))
    model.add(
        tf.keras.layers.Conv1D(16,
                               10,
                               activation=activation,
                               kernel_constraint=maxnorm(weight_constraint),
                               kernel_initializer=init_mode))
    model.add(tf.keras.layers.GlobalAveragePooling1D())
    model.add(tf.keras.layers.Dropout(dropout_rate))
    model.add(tf.keras.layers.Dense(number_of_classes, activation='softmax'))
    print(model.summary())

    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['acc', 'mse'])

    return model
def create_model(epochs=25):
    model = Sequential()
    model.add(
        Conv2D(32, (3, 3),
               input_shape=(3, 32, 32),
               padding='same',
               activation='relu',
               kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(
        Conv2D(32, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=maxnorm(3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(
        Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=maxnorm(3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(
        Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               kernel_constraint=maxnorm(3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dropout(0.2))
    model.add(Dense(1024, activation='relu', kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(Dense(10, activation='softmax'))
    lrate = 0.01
    decay = lrate / epochs
    sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    return model
    def seattle_model(input_width=32, input_height=32, feature_maps=32, feature_window_size=(5, 5), dropout1=0.2,
                      dense=128, dropout2=0.5, use_max_pooling=True, pool_size=(2, 2), optimizer='rmsprop'):
        model = Sequential()

        # - 20 feature maps (each feature map is a reduced-size convolution that detects a different feature)
        # - 3 pixel square window
        model.add(Conv2D(feature_maps,
                         feature_window_size,
                         input_shape=(input_width, input_height, 6),
                         padding='same',
                         data_format='channels_last',
                         activation='relu'))

        # - 40 feature maps (add more features)
        # - 3 pixel square window
        model.add(Conv2D(feature_maps,
                         feature_window_size,
                         padding='same',
                         data_format='channels_last',
                         activation='relu'))

        # Pooling layer
        if use_max_pooling:
            model.add(MaxPooling2D(pool_size=pool_size,
                                   data_format='channels_last'))
        else:
            model.add(AveragePooling2D(pool_size=pool_size,
                                       data_format='channels_last'))

        model.add(Dropout(0.2))

        model.add(Flatten())
        model.add(Dense(128,
                        activation='relu',
                        kernel_constraint=maxnorm(3)))

        # Dropout set to 50%.
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))

        model.compile(loss='categorical_crossentropy',
                      metrics=['binary_accuracy'],
                      optimizer='rmsprop')

        return model
Exemple #4
0
def design_dnn(nb_features,
               input_shape,
               nb_levels,
               conv_size,
               nb_labels,
               feat_mult=1,
               pool_size=2,
               padding='same',
               activation='elu',
               final_layer='dense-sigmoid',
               conv_dropout=0,
               conv_maxnorm=0,
               nb_input_features=1,
               batch_norm=False,
               name=None,
               prefix=None,
               use_strided_convolution_maxpool=True,
               nb_conv_per_level=2):
    """
    "deep" cnn with dense or global max pooling layer @ end...

    Could use sequential...
    """
    def _global_max_nd(xtens):
        ytens = K.batch_flatten(xtens)
        return K.max(ytens, 1, keepdims=True)

    model_name = name
    if model_name is None:
        model_name = 'model_1'
    if prefix is None:
        prefix = model_name

    ndims = len(input_shape)
    input_shape = tuple(input_shape)

    convL = getattr(KL, 'Conv%dD' % ndims)
    maxpool = KL.MaxPooling3D if len(input_shape) == 3 else KL.MaxPooling2D
    if isinstance(pool_size, int):
        pool_size = (pool_size, ) * ndims

    # kwargs for the convolution layer
    conv_kwargs = {'padding': padding, 'activation': activation}
    if conv_maxnorm > 0:
        conv_kwargs['kernel_constraint'] = maxnorm(conv_maxnorm)

    # initialize a dictionary
    enc_tensors = {}

    # first layer: input
    name = '%s_input' % prefix
    enc_tensors[name] = KL.Input(shape=input_shape + (nb_input_features, ),
                                 name=name)
    last_tensor = enc_tensors[name]

    # down arm:
    # add nb_levels of conv + ReLu + conv + ReLu. Pool after each of first nb_levels - 1 layers
    for level in range(nb_levels):
        for conv in range(nb_conv_per_level):
            if conv_dropout > 0:
                name = '%s_dropout_%d_%d' % (prefix, level, conv)
                enc_tensors[name] = KL.Dropout(conv_dropout)(last_tensor)
                last_tensor = enc_tensors[name]

            name = '%s_conv_%d_%d' % (prefix, level, conv)
            nb_lvl_feats = np.round(nb_features * feat_mult**level).astype(int)
            enc_tensors[name] = convL(nb_lvl_feats,
                                      conv_size,
                                      **conv_kwargs,
                                      name=name)(last_tensor)
            last_tensor = enc_tensors[name]

        # max pool
        if use_strided_convolution_maxpool:
            name = '%s_strided_conv_%d' % (prefix, level)
            enc_tensors[name] = convL(nb_lvl_feats,
                                      pool_size,
                                      **conv_kwargs,
                                      name=name)(last_tensor)
            last_tensor = enc_tensors[name]
        else:
            name = '%s_maxpool_%d' % (prefix, level)
            enc_tensors[name] = maxpool(pool_size=pool_size,
                                        name=name,
                                        padding=padding)(last_tensor)
            last_tensor = enc_tensors[name]

    # dense layer
    if final_layer == 'dense-sigmoid':

        name = "%s_flatten" % prefix
        enc_tensors[name] = KL.Flatten(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_dense' % prefix
        enc_tensors[name] = KL.Dense(1, name=name,
                                     activation="sigmoid")(last_tensor)

    elif final_layer == 'dense-tanh':

        name = "%s_flatten" % prefix
        enc_tensors[name] = KL.Flatten(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_dense' % prefix
        enc_tensors[name] = KL.Dense(1, name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        # Omittting BatchNorm for now, it seems to have a cpu vs gpu problem
        # https://github.com/tensorflow/tensorflow/pull/8906
        # https://github.com/fchollet/keras/issues/5802
        # name = '%s_%s_bn' % prefix
        # enc_tensors[name] = KL.BatchNormalization(axis=batch_norm, name=name)(last_tensor)
        # last_tensor = enc_tensors[name]

        name = '%s_%s_tanh' % prefix
        enc_tensors[name] = KL.Activation(activation="tanh",
                                          name=name)(last_tensor)

    elif final_layer == 'dense-softmax':

        name = "%s_flatten" % prefix
        enc_tensors[name] = KL.Flatten(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_dense' % prefix
        enc_tensors[name] = KL.Dense(nb_labels,
                                     name=name,
                                     activation="softmax")(last_tensor)

    # global max pooling layer
    elif final_layer == 'myglobalmaxpooling':

        name = '%s_batch_norm' % prefix
        enc_tensors[name] = KL.BatchNormalization(axis=batch_norm,
                                                  name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_global_max_pool' % prefix
        enc_tensors[name] = KL.Lambda(_global_max_nd, name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_global_max_pool_reshape' % prefix
        enc_tensors[name] = KL.Reshape((1, 1), name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        # cannot do activation in lambda layer. Could code inside, but will do extra lyaer
        name = '%s_global_max_pool_sigmoid' % prefix
        enc_tensors[name] = KL.Conv1D(1,
                                      1,
                                      name=name,
                                      activation="sigmoid",
                                      use_bias=True)(last_tensor)

    elif final_layer == 'globalmaxpooling':

        name = '%s_conv_to_featmaps' % prefix
        enc_tensors[name] = KL.Conv3D(2, 1, name=name,
                                      activation="relu")(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_global_max_pool' % prefix
        enc_tensors[name] = KL.GlobalMaxPooling3D(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        # cannot do activation in lambda layer. Could code inside, but will do extra lyaer
        name = '%s_global_max_pool_softmax' % prefix
        enc_tensors[name] = KL.Activation('softmax', name=name)(last_tensor)

    last_tensor = enc_tensors[name]

    # create the model
    model = Model(inputs=[enc_tensors['%s_input' % prefix]],
                  outputs=[last_tensor],
                  name=model_name)
    return model
Exemple #5
0
def create_model(input_dim):
    model = tf.keras.models.Sequential([
        Conv2D(16, (3, 3),
               activation="relu",
               padding="same",
               input_shape=input_dim),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(32, (3, 3), activation="relu", padding="same"),
        MaxPooling2D((2, 2)),
        Activation('relu'),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(64, (3, 3), activation="relu", padding="same"),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(128, (3, 3), activation="relu", padding="same"),
        MaxPooling2D((2, 2)),
        Activation('relu'),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(256, (3, 3), activation="relu", padding="same"),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(512, (3, 3), activation="relu", padding="same"),
        Dropout(0.2),
        BatchNormalization(),
        Conv2D(1024, (3, 3), activation="relu", padding="same"),
        Dropout(0.2),
        BatchNormalization(),
        Flatten(),
        Dropout(0.2),
        Dense(2048,
              activation="relu",
              bias_regularizer=tf.keras.regularizers.L1L2(l1=0.01, l2=0.001),
              kernel_constraint=maxnorm(3)),
        Dropout(0.2),
        BatchNormalization(),
        Dense(1024,
              activation="relu",
              bias_regularizer=tf.keras.regularizers.L1L2(l1=0.01, l2=0.001),
              kernel_constraint=maxnorm(3)),
        Dropout(0.2),
        BatchNormalization(),
        Dense(512,
              activation="relu",
              bias_regularizer=tf.keras.regularizers.L1L2(l1=0.01, l2=0.001),
              kernel_constraint=maxnorm(3)),
        Dropout(0.2),
        BatchNormalization(),
        Dense(256,
              activation="relu",
              bias_regularizer=tf.keras.regularizers.L1L2(l1=0.01, l2=0.001),
              kernel_constraint=maxnorm(3)),
        Dropout(0.2),
        BatchNormalization(),
        Dense(128,
              activation="relu",
              bias_regularizer=tf.keras.regularizers.L1L2(l1=0.01, l2=0.001),
              kernel_constraint=maxnorm(3)),
        Dropout(0.2),
        BatchNormalization(),
        Dense(10, activation="softmax")
    ])
    model.compile(
        optimizer="adam",
        loss="categorical_crossentropy",
        metrics=["accuracy"],
    )
    return model
Exemple #6
0
X = np.asarray(data_frame_for_tf)
y = np.asarray(special_class)

import tensorflow.python.keras
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import InputLayer
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.constraints import maxnorm
from sklearn.model_selection import train_test_split

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state= 1234)

model = Sequential()
model.add(Dense(1024, input_dim=18098, activation='relu', kernel_constraint=maxnorm(3)))
model.add(Dropout(rate=0.2))
model.add(Dense(512, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(256, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(128, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(64, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(32, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(16, activation='relu', kernel_constraint=maxnorm(5)))
model.add(Dropout(rate=0.2))
model.add(Dense(1, activation='sigmoid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Conv2D(128, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())

# Flatting
model.add(Flatten())
model.add(Dropout(0.2))

# Classification
model.add(Dense(256, kernel_constraint=maxnorm(3)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(128, kernel_constraint=maxnorm(3)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())

model.add(Dense(class_num))
model.add(Activation('softmax'))

# Set the epochs and optimizer
epochs = 25
optimizer = 'adam'