Пример #1
0
noise = np.random.normal(
    loc=0, scale=4, size=len(x)
)  #loc is mean, scale is std, size is how many values, which is 100 values from x

#predict y
y = (2 * x) + b + noise
# plt.plot(x, y, '*') # where * is to have x and y crossing
# plt.show()

# let's create a keras model with dense layers
# ------
# chose model and add layers => input, hidden and output
# number is the total neurons to deploy, activation is activation function, inpout dimension is x only one variable

model = keras.Sequential([
    layers.Dense(4, input_shape=(1, ), activation="relu", name="layer1"),
    layers.Dense(4, activation="relu", name="layer2"),
    layers.Dense(1, name="layer3"),
])

#create weights into the model => build it => return a tensor
a = tf.ones((1, 1))
b = model(a)
print("----------")
print("Number of weights after calling the model:", len(model.weights))

# compile the model and see the summary details
model.compile(loss='mse', optimizer='adam')
model.summary()

# # fit the model
Пример #2
0
#!/usr/bin/env python

import tensorflow as tf
from tensorflow.keras import layers

model = tf.keras.Sequential()
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
Пример #3
0
    def train(self, fields=None, load_previous=False, old=None, crnt=0):
        assert fields is not None
        if isinstance(fields, str):
            fields = [fields]
        # val = {}
        self.get_labels(test_train=False)
        X_test, Y_test = self.X, self.Y
        self.get_labels()
        print("Test size", X_test.shape)
        print("training size ", self.X.shape)
        # for field in fields:
        field = 'combo'
        print("Traing the NN for field - ", field)
        callback = tf.keras.callbacks.LearningRateScheduler(self.lr_rate,
                                                            verbose=False)
        if not load_previous:
            print("==============Building models=============")
            input = tf.keras.Input(shape=(self.X.to_numpy().shape[-1]),
                                   name='embed')
            # x = layers.Dense(2048, activation='relu')(input)
            x = layers.Dense(1024, activation='relu')(input)
            x = layers.Dropout(rate=0.2, seed=10)(x)
            x = layers.Dense(512, activation='relu')(x)
            x = layers.Dropout(rate=0.2, seed=10)(x)
            x = layers.Dense(256, activation='relu')(x)
            nfix = layers.Dropout(rate=0.2)(x)
            nfix = layers.Dense(64, activation='relu', name='nfix0')(nfix)
            # nfix = (layers.Dense(64, activation='relu', name='nfix0',
            #                      kernel_regularizer='l2')(x))
            nfix = layers.Dense(16, activation='relu', name='nfix2')(nfix)
            nfix = layers.Dense(3, name='NFIX')(nfix)
            nfix_dec = layers.Dense(16, activation='relu', name='nfix6')(nfix)
            nfix_dec = layers.Dense(64, activation='relu',
                                    name='nfix7')(nfix_dec)
            nfix_dec = layers.Dense(256, activation='relu',
                                    name='nfix8')(nfix_dec)
            nfix_dec = layers.Dense(512, activation='relu',
                                    name='nfix9')(nfix_dec)
            nfix_dec = layers.Dense(1024, activation='relu',
                                    name='nfix10')(nfix_dec)
            fval = layers.Dense(self.X.to_numpy().shape[-1])(nfix_dec)

            self.model = Model(input, [fval])
            self.model.compile(optimizer='adam', loss='mse')
        else:
            print("==============Loading models=============")
            self.model = load_model("temp_model_" + field)
        print(self.model.summary())
        keras.backend.set_value(self.model.optimizer.learning_rate, 1e-4)
        es = EarlyStopping(monitor='val_loss',
                           mode='min',
                           verbose=False,
                           patience=30)
        mc = ModelCheckpoint('fm_' + str(crnt) + "_" + field,
                             monitor='val_loss',
                             mode='min',
                             verbose=0,
                             save_best_only=True)
        print(self.X.columns.to_list(), X_test.columns.to_list())
        self.model.fit(
            self.X.to_numpy(),
            self.X.to_numpy(),
            epochs=self.epochs,
            batch_size=self.batch_size,
            verbose=True,
            # validation_split=0.2,
            validation_data=(X_test.to_numpy(), X_test.to_numpy()),
            callbacks=[callback, mc, es],
            use_multiprocessing=True)
        self.model.save("temp_model_" + field)
Пример #4
0
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np

inputs = keras.Input(shape=(784,), name='digits')
x1 = layers.Dense(64, activation='relu')(inputs)
x2 = layers.Dense(64, activation='relu')(x1)
outputs = layers.Dense(10, name='predictions')(x2)
model = keras.Model(inputs=inputs, outputs=outputs)

(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784))
x_test = np.reshape(x_test, (-1, 784))

x_train, y_train = x_train[:-10000], y_train[:-10000]
x_val, y_val     = x_train[-10000:], y_train[-10000:]

BATCH_SIZE = 64
EPOCHS = 20

train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(BATCH_SIZE)
val_dataset   = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset   = val_dataset.batch(BATCH_SIZE)

optimizer        = keras.optimizers.SGD(learning_rate=0.001)
loss_fn          = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric   = keras.metrics.SparseCategoricalAccuracy()
    x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(x)
    if args.dropout:
        x = layers.Dropout(0.2)(x)
    x = layers.Conv2D(filters=128,
                      kernel_size=3,
                      activation=af,
                      padding="same")(x)
    x = layers.Conv2D(filters=128,
                      kernel_size=3,
                      activation=af,
                      padding="same")(x)
    x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(x)
    if args.dropout:
        x = layers.Dropout(0.2)(x)
    x = layers.Flatten()(x)
    x = layers.Dense(128, activation=af, name='dense_1')(x)
    outputs = layers.Dense(num_classification_categories, name='output')(x)
    keras_model = keras.Model(inputs=inputs, outputs=outputs)

if args.sgd:
    optimizer = keras.optimizers.SGD(learning_rate)
else:
    optimizer = keras.optimizers.Adam(learning_rate)

if args.log_results:
    print(
        "targets,iter,epoch,accuracy,test_accuracy,mbs,rbs,learning_rate,steps_per_epoch,prc,cumul_cpu_time"
    )
    start_time = time.time()

    class CustomCallback(keras.callbacks.Callback):
Пример #6
0
def EfficientNet(width_coefficient,
                 depth_coefficient,
                 default_resolution,
                 dropout_rate=0.2,
                 drop_connect_rate=0.2,
                 depth_divisor=8,
                 blocks_args=DEFAULT_BLOCKS_ARGS,
                 model_name='efficientnet',
                 include_top=True,
                 weights='imagenet',
                 input_tensor=None,
                 input_shape=None,
                 pooling=None,
                 classes=1000,
                 **kwargs):
    """Instantiates the EfficientNet architecture using given scaling coefficients.
    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.

    # Arguments
        width_coefficient: float, scaling coefficient for network width.
        depth_coefficient: float, scaling coefficient for network depth.
        default_resolution: int, default input image size.
        dropout_rate: float, dropout rate before final classifier layer.
        drop_connect_rate: float, dropout rate at skip connections.
        depth_divisor: int.
        blocks_args: A list of BlockArgs to construct block modules.
        model_name: string, model name.
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor
            (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False.
            It should have exactly 3 inputs channels.
        pooling: optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=default_resolution,
                                      min_size=32,
                                      data_format=backend.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if backend.backend() == 'tensorflow':
            from tensorflow.python.keras.backend import is_keras_tensor
        else:
            is_keras_tensor = backend.is_keras_tensor
        if not is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
    activation = get_swish(**kwargs)

    # Build stem
    x = img_input
    x = layers.Conv2D(round_filters(32, width_coefficient, depth_divisor),
                      3,
                      strides=(2, 2),
                      padding='same',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name='stem_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
    x = layers.Activation(activation, name='stem_activation')(x)

    # Build blocks
    num_blocks_total = sum(block_args.num_repeat for block_args in blocks_args)
    block_num = 0
    for idx, block_args in enumerate(blocks_args):
        assert block_args.num_repeat > 0
        # Update block input and output filters based on depth multiplier.
        block_args = block_args._replace(
            input_filters=round_filters(block_args.input_filters,
                                        width_coefficient, depth_divisor),
            output_filters=round_filters(block_args.output_filters,
                                         width_coefficient, depth_divisor),
            num_repeat=round_repeats(block_args.num_repeat, depth_coefficient))

        # The first block needs to take care of stride and filter size increase.
        drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
        x = mb_conv_block(x,
                          block_args,
                          activation=activation,
                          drop_rate=drop_rate,
                          prefix='block{}a_'.format(idx + 1))
        block_num += 1
        if block_args.num_repeat > 1:
            # pylint: disable=protected-access
            block_args = block_args._replace(
                input_filters=block_args.output_filters, strides=[1, 1])
            # pylint: enable=protected-access
            for bidx in xrange(block_args.num_repeat - 1):
                drop_rate = drop_connect_rate * float(
                    block_num) / num_blocks_total
                block_prefix = 'block{}{}_'.format(
                    idx + 1, string.ascii_lowercase[bidx + 1])
                x = mb_conv_block(x,
                                  block_args,
                                  activation=activation,
                                  drop_rate=drop_rate,
                                  prefix=block_prefix)
                block_num += 1

    # Build top
    x = layers.Conv2D(round_filters(1280, width_coefficient, depth_divisor),
                      1,
                      padding='same',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name='top_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x)
    x = layers.Activation(activation, name='top_activation')(x)

    if include_top:
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        if dropout_rate and dropout_rate > 0:
            x = layers.Dropout(dropout_rate, name='top_dropout')(x)
        x = layers.Dense(classes,
                         activation='softmax',
                         kernel_initializer=DENSE_KERNEL_INITIALIZER,
                         name='probs')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D(name='max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name=model_name)

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_autoaugment.h5'
            file_hash = WEIGHTS_HASHES[model_name][0]
        else:
            file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'
            file_hash = WEIGHTS_HASHES[model_name][1]
        weights_path = keras_utils.get_file(file_name,
                                            BASE_WEIGHTS_PATH + file_name,
                                            cache_subdir='models',
                                            file_hash=file_hash)
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model
Пример #7
0
def VGG19(num_classes, input_shape=(48, 48, 3), dropout=None, block5=True, batch_norm=True):
    img_input = layers.Input(shape=input_shape)

    #Block1
    x = layers.Conv2D(64, (3,3),
                      padding='same', 
                      name='block1_conv1')(img_input)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(64, (3,3), 
                      padding='same', 
                      name='block1_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    #Block2
    x = layers.Conv2D(128, (3,3),  
                      padding='same', 
                      name='block2_conv1')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(128, (3,3),  
                      padding='same', 
                      name='block2_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    #Block3
    x = layers.Conv2D(256, (3,3), 
                      padding='same', 
                      name='block3_conv1')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(256, (3,3), 
                      padding='same', 
                      name='block3_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(256, (3,3), 
                      padding='same', 
                      name='block3_conv3')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(256, (3,3), 
                      padding='same', 
                      name='block3_conv4')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
    
    #Block4
    x = layers.Conv2D(512, (3,3),  
                      padding='same', 
                      name='block4_conv1')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(512, (3,3),  
                      padding='same', 
                      name='block4_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(512, (3,3), 
                      activation='relu', 
                      padding='same', 
                      name='block4_conv3')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(512, (3,3),
                      padding='same', 
                      name='block4_conv4')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
    
    #Block5
    if block5:
        x = layers.Conv2D(512, (3,3),  
                      padding='same', 
                      name='block5_conv1')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)
        
        x = layers.Conv2D(512, (3,3),  
                        padding='same', 
                        name='block5_conv2')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        x = layers.Conv2D(512, (3,3), 
                        activation='relu', 
                        padding='same', 
                        name='block5_conv3')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        x = layers.Conv2D(512, (3,3),
                        padding='same', 
                        name='block5_conv4')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    x = layers.AveragePooling2D((1, 1), strides=(1, 1), name='block6_pool')(x)
    x = layers.Flatten()(x)
    if dropout:
        x = layers.Dropout(dropout)(x)
    x = layers.Dense(num_classes, activation='softmax', name='predictions')(x)
    model = models.Model(img_input, x, name='vgg19')
    return model
Пример #8
0
def main():
    # [b, 32, 32, 3] => [b, 1, 1, 512]
    conv_net = Sequential(conv_layers)

    fc_net = Sequential([
        layers.Dense(256, activation=tf.nn.relu),
        layers.Dense(128, activation=tf.nn.relu),
        layers.Dense(100, activation=None),
    ])

    conv_net.build(input_shape=[None, 32, 32, 3])
    fc_net.build(input_shape=[None, 512])
    optimizer = optimizers.Adam(lr=1e-4)

    # [1, 2] + [3, 4] => [1, 2, 3, 4]
    variables = conv_net.trainable_variables + fc_net.trainable_variables

    for epoch in range(50):

        for step, (x, y) in enumerate(train_db):

            with tf.GradientTape() as tape:
                # [b, 32, 32, 3] => [b, 1, 1, 512]
                out = conv_net(x)
                # flatten, => [b, 512]
                out = tf.reshape(out, [-1, 512])
                # [b, 512] => [b, 100]
                logits = fc_net(out)
                # [b] => [b, 100]
                y_onehot = tf.one_hot(y, depth=100)
                # compute loss
                loss = tf.losses.categorical_crossentropy(y_onehot,
                                                          logits,
                                                          from_logits=True)
                loss = tf.reduce_mean(loss)

            grads = tape.gradient(loss, variables)
            optimizer.apply_gradients(zip(grads, variables))

            if step % 100 == 0:
                print(epoch, step, 'loss:', float(loss))

        total_num = 0
        total_correct = 0
        for x, y in test_db:

            out = conv_net(x)
            out = tf.reshape(out, [-1, 512])
            logits = fc_net(out)
            prob = tf.nn.softmax(logits, axis=1)
            pred = tf.argmax(prob, axis=1)
            pred = tf.cast(pred, dtype=tf.int32)

            correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
            correct = tf.reduce_sum(correct)

            total_num += x.shape[0]
            total_correct += int(correct)

        acc = total_correct / total_num
        print(epoch, 'acc:', acc)
Пример #9
0
def GoogLeNet(im_height=224, im_width=224, class_num=1000, aux_logits=False):
    #输入224*224*3的图像
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype='float32')
    x = layers.Conv2D(64,
                      kernel_size=7,
                      strides=2,
                      padding="SAME",
                      activation="relu",
                      name="conv2d_1")(input_image)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding="SAME",
                         name="maxpool_1")(x)
    x = layers.Conv2D(64, kernel_size=1, activation="relu", name="conv2d_2")(x)
    x = layers.Conv2D(192,
                      kernel_size=3,
                      padding="SAME",
                      activation="relu",
                      name="conv2d_3")(x)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding="SAME",
                         name="maxpool_2")(x)

    # inception模块
    x = Inception(64, 96, 128, 16, 32, 32, name="inception_3a")(x)
    x = Inception(128, 128, 192, 32, 96, 64, name="inception_3b")(x)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding='same',
                         name="maxpool_3")(x)
    # inception模块
    x = Inception(192, 96, 208, 16, 48, 64, name='inception_4a')(x)
    #判断是否使用辅助分类器1:训练时使用,测试时去掉
    if aux_logits:
        aux1 = InceptionAux(class_num, name='aux_1')(x)
    # inception模块
    x = Inception(160, 112, 224, 24, 64, 64, name="inception_4b")(x)
    x = Inception(128, 128, 256, 24, 64, 64, name="inception_4c")(x)
    x = Inception(112, 144, 288, 32, 64, 64, name="inception_4d")(x)
    # 判断是否使用辅助分类器2,训练时使用,测试时去掉
    if aux_logits:
        aux2 = InceptionAux(class_num, name='aux_2')(x)
    # inception模块
    x = Inception(256, 160, 320, 32, 128, 128, name="inception_4e")(x)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding="SAME",
                         name="maxpool_4")(x)
    # Inception模块
    x = Inception(256, 160, 320, 32, 128, 128, name="inception_5a")(x)
    x = Inception(384, 192, 384, 48, 128, 128, name="inception_5b")(x)
    # 平均池化层
    x = layers.AvgPool2D(pool_size=7, strides=1, name="avgpool_1")(x)
    # 拉直
    x = layers.Flatten(name="output_flatten")(x)
    x = layers.Dropout(rate=0.4, name="output_dropout")(x)
    x = layers.Dense(class_num, name="output_dense")(x)
    aux3 = layers.Softmax(name="aux_3")(x)
    # 判断是否使用辅助分类器
    if aux_logits:
        model = models.Model(inputs=input_image, outputs=[aux1, aux2, aux3])
    else:
        model = models.Model(inputs=input_image, outputs=aux3)
    return model
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.0
x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.0

model = keras.Sequential(
    [
        layers.Input(shape=(28, 28, 1)),
        layers.Conv2D(64, (3, 3), padding="same"),
        layers.ReLU(),
        layers.Conv2D(128, (3, 3), padding="same"),
        layers.ReLU(),
        layers.Flatten(),
        layers.Dense(10),
    ],
    name="model",
)


class CustomFit(keras.Model):
    def __init__(self, model):
        super(CustomFit, self).__init__()
        self.model = model

    def compile(self, optimizer, loss):
        super(CustomFit, self).compile()
        self.optimizer = optimizer
        self.loss = loss
Пример #11
0
from tensorflow.keras import layers
from tensorflow.keras.layers import Dense, LSTM, Dropout
from tensorflow.keras.callbacks import EarlyStopping

embedding_dim = 128

model = Sequential()
model.add(
    layers.Embedding(input_dim=vocab_size,
                     output_dim=embedding_dim,
                     input_length=maxlen))
model.add(Dropout(0.5))
model.add(layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3))
model.add(layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(128, activation="relu"))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation="sigmoid", name="predictions"))
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

## Fit the model

es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10)

history = model.fit(X_train,
                    y_train,
                    validation_split=0.2,
                    epochs=10,
Пример #12
0
def make_discriminator_final_model():
    model = tf.keras.Sequential()
    model.add(layers.Dense(1, input_shape=[4096]))
    return model
AUTOTUNE = tf.data.experimental.AUTOTUNE

train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE)

## Create the model
# It's time to create our neural network:

embedding_dim = 16
model = tf.keras.Sequential([
    layers.Embedding(max_features + 1, embedding_dim),
    layers.Dropout(0.2),
    layers.GlobalAveragePooling1D(),
    layers.Dropout(0.2),
    layers.Dense(1)
])

model.summary()

## The layers are stacked sequentially to build the classifier:

# 1. The first layer is an Embedding layer. This layer takes the integer-encoded reviews and
#    looks up an embedding vector for each word-index. These vectors are learned as
#    the model trains. The vectors add a dimension to the output array.
#    The resulting dimensions are: (batch, sequence, embedding).
#    To learn more about embeddings, see the word embedding tutorial.
# 2. Next, a GlobalAveragePooling1D layer returns a fixed-length output vector for each example
#    by averaging over the sequence dimension. This allows the model to handle input of variable
#    length, in the simplest way possible.
# 3. This fixed-length output vector is piped through a fully-connected (Dense) layer with 16 hidden units.
Пример #14
0
def decoder(input_decoder):
    # Initial Block
    inputs = keras.Input(shape=input_decoder, name='input_layer')
    x = layers.Dense(4096, name='dense_1')(inputs)
    x = tf.reshape(x, [-1, 8, 8, 64], name='Reshape_Layer')

    # Block 1
    x = layers.Conv2DTranspose(64,
                               3,
                               strides=2,
                               padding='same',
                               name='conv_transpose_1')(x)
    x = layers.BatchNormalization(name='bn_1')(x)
    x = layers.LeakyReLU(name='lrelu_1')(x)

    # Block 2
    x = layers.Conv2DTranspose(64,
                               3,
                               strides=2,
                               padding='same',
                               name='conv_transpose_2')(x)
    x = layers.BatchNormalization(name='bn_2')(x)
    x = layers.LeakyReLU(name='lrelu_2')(x)

    # Block 3
    x = layers.Conv2DTranspose(64,
                               3,
                               2,
                               padding='same',
                               name='conv_transpose_3')(x)
    x = layers.BatchNormalization(name='bn_3')(x)
    x = layers.LeakyReLU(name='lrelu_3')(x)

    # Block 4
    x = layers.Conv2DTranspose(32,
                               3,
                               2,
                               padding='same',
                               name='conv_transpose_4')(x)
    x = layers.BatchNormalization(name='bn_4')(x)
    x = layers.LeakyReLU(name='lrelu_4')(x)

    # Block 5
    outputs = layers.Conv2DTranspose(3,
                                     3,
                                     2,
                                     padding='same',
                                     activation='sigmoid',
                                     name='conv_transpose_5')(x)
    model = tf.keras.Model(inputs, outputs, name="Decoder")
    return model


#
# opt = tf.optimizers.Adam(learning_rate=INIT_LR)
# autoencoder.compile(loss="mse", optimizer=opt)
# encoder.summary()
# decoder.summary()
#
# hist = autoencoder.fit(train_gen, validation_data=val_gen, epochs=EPOCHS)

# reconstruction = None
# lat_space = None
# for i in normalized_ds:
#     latent = encoder.predict(i)
#     out = decoder.predict(latent)
#     if reconstruction is None:
#         reconstruction = out
#         lat_space = latent
#     else:
#         reconstruction = np.concatenate((reconstruction, out))
#         lat_space = np.concatenate((lat_space, latent))
#     if reconstruction.shape[0] > 5000:
#         break
Пример #15
0
(x, y), (x_test, y_test) = datasets.fashion_mnist.load_data()
print(x.shape, y.shape)

db = tf.data.Dataset.from_tensor_slices((x, y))
db = db.map(preprocess).shuffle(10000).batch(BATCH_SIZE)

db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.map(preprocess).batch(BATCH_SIZE)

db_iter = iter(db)
sample = next(db_iter)
print('batch:', sample[0].shape, sample[1].shape)

model = Sequential([
    layers.Dense(256, activation=tf.nn.relu),  # [b, 784] -> [b, 256]
    layers.Dense(128, activation=tf.nn.relu),  # [b, 256] -> [b, 128]
    layers.Dense(64, activation=tf.nn.relu),  # [b, 128] -> [b, 64]
    layers.Dense(32, activation=tf.nn.relu),  # [b, 64] -> [b, 32]
    layers.Dense(10)  # [b, 32] -> [b, 10]
])

model.build(input_shape=[None, 28 * 28])  # The first input size
model.summary()
# learning rate
optimizers = optimizers.Adam(lr=1e-3)


def main():
    for epoch in range(30):
        for step, (x_, y_) in enumerate(db):
Пример #16
0
    def build(self, hp, inputs=None):
        """
        # Arguments
             hp: HyperParameters. The hyperparameters for building the model.
             inputs: Tensor of Shape [batch_size, seq_len]

        # Returns
            Output Tensor of shape `[batch_size, seq_len, embedding_dim]`.
        """
        inputs = nest.flatten(inputs)
        utils.validate_num_inputs(inputs, 1)
        pretraining = self.pretraining or hp.Choice(
            "pretraining",
            ["random", "glove", "fasttext", "word2vec", "none"],
            default="none",
        )
        embedding_dim = self.embedding_dim or hp.Choice(
            "embedding_dim", [32, 64, 128, 256, 512], default=128)
        num_heads = self.num_heads or hp.Choice("num_heads", [8, 16, 32],
                                                default=8)

        dense_dim = self.dense_dim or hp.Choice(
            "dense_dim", [128, 256, 512, 1024, 2048], default=2048)
        dropout = self.dropout or hp.Choice("dropout", [0.0, 0.25, 0.5],
                                            default=0)

        ffn = tf.keras.Sequential([
            layers.Dense(dense_dim, activation="relu"),
            layers.Dense(embedding_dim),
        ])

        layernorm1 = layers.LayerNormalization(epsilon=1e-6)
        layernorm2 = layers.LayerNormalization(epsilon=1e-6)
        dropout1 = layers.Dropout(dropout)
        dropout2 = layers.Dropout(dropout)
        # Token and Position Embeddings
        input_node = nest.flatten(inputs)[0]
        token_embedding = Embedding(
            max_features=self.max_features,
            pretraining=pretraining,
            embedding_dim=embedding_dim,
            dropout=dropout,
        ).build(hp, input_node)
        maxlen = input_node.shape[-1]
        batch_size = tf.shape(input_node)[0]
        positions = self.pos_array_funct(maxlen, batch_size)
        position_embedding = Embedding(
            max_features=maxlen,
            pretraining=pretraining,
            embedding_dim=embedding_dim,
            dropout=dropout,
        ).build(hp, positions)
        output_node = tf.keras.layers.Add()(
            [token_embedding, position_embedding])
        attn_output = MultiHeadSelfAttention(embedding_dim,
                                             num_heads).build(hp, output_node)
        attn_output = dropout1(attn_output)
        add_inputs_1 = tf.keras.layers.Add()([output_node, attn_output])
        out1 = layernorm1(add_inputs_1)
        ffn_output = ffn(out1)
        ffn_output = dropout2(ffn_output)
        add_inputs_2 = tf.keras.layers.Add()([out1, ffn_output])
        output = layernorm2(add_inputs_2)
        return output
Пример #17
0
 def __init__(self, num_action: int, num_hidden_units: int):
     super(ActorCritic, self).__init__()
     self.common = layers.Dense(num_hidden_units, activation=None)
     self.activation = layers.ReLU()
     self.actor = layers.Dense(num_action)
     self.critic = layers.Dense(1)
Пример #18
0
#!/usr/local/bin/python3

import tensorflow as tf
from tensorflow.keras import layers

print(tf.VERSION)
print(tf.keras.__version__)

model = tf.keras.Sequential([
# Adds a densely-connected layer with 64 units to the model:
layers.Dense(43, activation='relu'),
# Adds a densely-connected layer with 64 units to the model:
layers.Dense(20, activation='relu'),
# Add another:
layers.Dense(1, activation='relu')])

# Configure a model for categorical classification.
#model.compile(optimizer=tf.train.RMSPropOptimizer(0.01),
#              loss=tf.keras.losses.binary_crossentropy,
#              metrics=[tf.keras.metrics.binary_accuracy])
model.compile(optimizer=tf.train.AdamOptimizer(),
              loss=tf.keras.losses.binary_crossentropy,
              metrics=[tf.keras.metrics.binary_accuracy])
import numpy as np


with open('train.data') as infile:
    train = [line.rstrip().split(' ') for line in infile.readlines()]
with open('test.data') as infile:
    test = [line.rstrip().split(' ') for line in infile.readlines()]
with open('train_converted.data') as infile:
Пример #19
0
#this script uses encoder and decoder
import collections
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers

encoder_vocab = 1000
decoder_vocab = 2000

encoder_input = layers.Input(shape=(None, ))
encoder_embedded = layers.Embedding(input_dim=encoder_vocab,
                                    output_dim=64)(encoder_input)

output, state_h, state_c = layers.LSTM(64, return_state=True,
                                       name='encoder')(encoder_embedded)
encoder_state = [state_h, state_c]

decoder_input = layers.Input(shape=(None, ))
decoder_embedded = layers.Embedding(input_dim=decoder_vocab,
                                    output_dim=64)(decoder_input)

#Pass the 2 states to a new LSTM layer, as initial state
decoder_output = layers.LSTM(64, name='decoder')(decoder_embedded,
                                                 initial_state=encoder_state)
output = layers.Dense(10)(decoder_output)

model = tf.keras.Model([encoder_input, decoder_input], output)
model.summary()
Пример #20
0
    box_key = 'open_' + str(i)
    img_key = 'img_' + str(i)
    center_key = 'center_' + str(i)

    box = np.array(f.get(box_key))
    test_boxes.append(box)

    img = np.array(f.get(img_key)) / 255
    test_images.append(np.logical_not(img))

    c = np.array(f.get(center_key))
    test_centers.append(floor((c[0] * 48 + c[1]) * 63 / 2304))

model = tf.keras.Sequential()
model.add(layers.Flatten(input_shape=(48, 48)))
model.add(layers.Dense((2304 + 64) / 2, activation='linear'))
model.add(layers.Dense(592, activation='linear'))
model.add(layers.Dense(64, activation='softmax'))

model.compile(optimizer=tf.train.AdamOptimizer(0.001),
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(np.array(train_images), np.array(train_centers), epochs=5)

print("running test")

test_loss, test_acc = model.evaluate(np.array(test_images),
                                     np.array(test_centers))

print('Test accuracy:', test_acc)
Пример #21
0
 def __init__(self, cfg):
     super(InputLayer, self).__init__()
     H, W, C = cfg.im_size[1], cfg.im_size[0], cfg.im_size[2]
     self.C = C
     self.avg_pool = layers.AveragePooling2D(pool_size=(H, W), strides=1)
     self.dense1 = layers.Dense(cfg.network["squeeze_ratio"])
Пример #22
0
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)

# Построение модели 1
embedding_vector_length = 32

model_a = Sequential()
model_a.add(
    layers.Embedding(top_words,
                     embedding_vector_length,
                     input_length=max_review_length))
model_a.add(
    layers.Conv1D(filters=32, kernel_size=3, padding='same',
                  activation='relu'))
model_a.add(layers.MaxPooling1D(pool_size=2))
model_a.add(layers.LSTM(100))
model_a.add(layers.Dense(1, activation='sigmoid'))
model_a.compile(loss='binary_crossentropy',
                optimizer='adam',
                metrics=['accuracy'])
print(model_a.summary())

# Построение модели 2
model_b = Sequential()
model_b.add(
    layers.Embedding(top_words,
                     embedding_vector_length,
                     input_length=max_review_length))
model_b.add(layers.Flatten())
model_b.add(layers.Dense(50, activation="relu"))
model_b.add(layers.Dropout(0.5, noise_shape=None, seed=None))
model_b.add(layers.Dense(50, activation="relu"))
Пример #23
0
# Fit the state of the layer to the spectrograms
# with `Normalization.adapt`.
norm_layer.adapt(data=spectrogram_ds.map(map_func=lambda spec, label: spec))

model = models.Sequential([
    layers.Input(shape=input_shape),
    # Downsample the input.
    layers.Resizing(32, 32),
    # Normalize.
    norm_layer,
    layers.Conv2D(32, 3, activation='relu'),
    layers.Conv2D(64, 3, activation='relu'),
    layers.MaxPooling2D(),
    layers.Dropout(0.25),
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.Dropout(0.5),
    layers.Dense(num_labels),
])

model.summary()

model.compile(
    optimizer=tf.keras.optimizers.Adam(),
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['accuracy'],
)

EPOCHS = 50
history = model.fit(
    train_ds,
Пример #24
0
    vectorizer.fit(sentences_train)
    X_train = vectorizer.transform(sentences_train)
    X_test  = vectorizer.transform(sentences_test)

    classifier = LogisticRegression()
    classifier.fit(X_train, y_train)
    score = classifier.score(X_test, y_test)
    print('Accuracy for {} data: {:.4f}'.format(source, score))

from tensorflow.keras.models import Sequential
from tensorflow.keras import layers

input_dim = X_train.shape[1]  # Number of features

model = Sequential()
model.add(layers.Dense(10, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()
vald=X_test,y_test
history = model.fit(X_train.todense(), y_train.todense(),
                    epochs=100,
                    verbose=False,
                    validation_data=vald,
                    batch_size=10)
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
x = tf.constant([2, 1, 0.1], dtype=tf.float32)
layer = layers.Softmax(axis=-1)  # 创建softmax层
layer(x)  # 调用 softmax前向计算
# np.exp(2)/ sum([ np.exp(i) for  i in [2, 1, 0.1] ])

## 8.1.2 网络容器
"""
通过 Sequential 封装成一个大网络模型
"""
from tensorflow.keras import layers, Sequential
network = Sequential([
    layers.Dense(3, activation=None),
    layers.ReLU(),
    layers.Dense(2, activation=None),
    layers.ReLU()
])
x = tf.random.normal([4, 3])
network(x)
# 也可以通过追加的方法增加网络
layers_num = 2
network = Sequential([])
for _ in range(layers_num):
    network.add(layers.Dense(3))
    network.add(layers.ReLU())

network.build(input_shape=(None, 4))
# layer1 4 * 3 + 3   layer2   3*3 + 3
     tf.keras.layers.MaxPooling2D(pool_size = (2,2)),
     tf.keras.layers.Conv2D(pass)
     tf.keras.layers.MaxPooling2D()
    ]
)

model_1 = models.Sequential()
model_1.add(layers.Conv2D(28, (3, 3), activation='relu', input_shape=(28, 28, 3)))
model_1.add(layers.MaxPooling2D((2, 2)))
model_1.add(layers.Conv2D(56, (3, 3), activation='relu'))
model_1.add(layers.MaxPooling2D((2, 2)))
model_1.add(layers.Conv2D(56, (3, 3), activation='relu'))
model_1.summary()

model_1.add(layers.Flatten(input_shape = (28, 28)))
model_1.add(layers.Dense(128, activation='relu'))
model_1.add(layers.Dense(10))

model_1.summary()

model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

history = model.fit(X_train, y_train, epochs=5)

predictions_con = model.predict_classes(X_test, verbose=0)

submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions_con)+1)),
                         "Label": predictions_con})
submissions.to_csv("Digit Recognizer Convolultional.csv", index=False, header=True)
Пример #27
0
w_input =  tf.keras.Input(shape=(53), name='w_input')


#%% embedding
x = embedding_layer(w_input)


#%% flatten
# x = layers.Flatten(input_shape=(53, VEC_SIZE))(x)


#%% LSTM layer
x = layers.LSTM(128)(x)
# x = layers.LSTM(128, return_sequences=True, return_state=True)(x)
# x = layers.Flatten()(x)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dropout(0.2)(x)


#%% output layer
answer = layers.Dense(16, activation='softmax')(x)


#%% model declaration
model = tf.keras.Model(inputs=w_input, outputs=answer)
model.summary()


#%%
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
Пример #28
0
          model = models.Sequential()
          model.add(layers.Conv2D(32, (3, 3), input_shape=(32,32,3)))
          model.add(layers.Activation('relu'))
          model.add(layers.GaussianNoise(noise_dict[1]))
          model.add(layers.MaxPooling2D((2, 2)))

          model.add(layers.Conv2D(64, (3, 3)))
          model.add(layers.Activation('relu'))
          model.add(layers.GaussianNoise(noise_dict[2]))
          model.add(layers.MaxPooling2D((2, 2)))

          model.add(layers.Conv2D(64, (3, 3)))
          model.add(layers.Activation('relu'))
          model.add(layers.GaussianNoise(noise_dict[3]))
          model.add(layers.Flatten())
          model.add(layers.Dense(64, activation='relu'))
          model.add(layers.Dense(10))
          model.compile(optimizer='adam',
                      loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                      metrics=['accuracy'])
          if i != 1:
              model.set_weights(weights0)
          history = model.fit(train_images, train_labels, epochs=3, 
                            validation_data=(validate_images, validate_labels))
          print('ending model')

      print('test model')
      model1=model
      if m==0:
          name='clearmodel'
      else:
Пример #29
0
print('Loaded InceptionV3')
# summary of original model
#pre_trained_model.summary()

########### grab last layer ##############
last_layer = pre_trained_model.get_layer('mixed7')  # Your Code Here
print('last layer output shape: ', last_layer.output_shape)
last_output = last_layer.output  # Your Code Here

########### Our own trainable NN at the end ##########
from tensorflow.keras.optimizers import RMSprop

# Flatten the output layer to 1 dimension
x = layers.Flatten()(last_output)
# Add a fully connected layer with 1,024 hidden units and ReLU activation
x = layers.Dense(1024, activation='relu')(x)
# Add a dropout rate of 0.2
x = layers.Dropout(0.2)(x)
# Add a final sigmoid layer for classification
x = layers.Dense(1, activation='sigmoid')(x)

model = Model(pre_trained_model.input, x)

model.compile(optimizer=RMSprop(lr=0.0001),
              loss='binary_crossentropy',
              metrics=['acc'])
# summary of model including our stuff
#model.summary()

############ Download and Unzip ################
url = 'https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip'
Пример #30
0
def design_dnn(nb_features,
               input_shape,
               nb_levels,
               conv_size,
               nb_labels,
               feat_mult=1,
               pool_size=2,
               padding='same',
               activation='elu',
               final_layer='dense-sigmoid',
               conv_dropout=0,
               conv_maxnorm=0,
               nb_input_features=1,
               batch_norm=False,
               name=None,
               prefix=None,
               use_strided_convolution_maxpool=True,
               nb_conv_per_level=2):
    """
    "deep" cnn with dense or global max pooling layer @ end...

    Could use sequential...
    """
    def _global_max_nd(xtens):
        ytens = K.batch_flatten(xtens)
        return K.max(ytens, 1, keepdims=True)

    model_name = name
    if model_name is None:
        model_name = 'model_1'
    if prefix is None:
        prefix = model_name

    ndims = len(input_shape)
    input_shape = tuple(input_shape)

    convL = getattr(KL, 'Conv%dD' % ndims)
    maxpool = KL.MaxPooling3D if len(input_shape) == 3 else KL.MaxPooling2D
    if isinstance(pool_size, int):
        pool_size = (pool_size, ) * ndims

    # kwargs for the convolution layer
    conv_kwargs = {'padding': padding, 'activation': activation}
    if conv_maxnorm > 0:
        conv_kwargs['kernel_constraint'] = maxnorm(conv_maxnorm)

    # initialize a dictionary
    enc_tensors = {}

    # first layer: input
    name = '%s_input' % prefix
    enc_tensors[name] = KL.Input(shape=input_shape + (nb_input_features, ),
                                 name=name)
    last_tensor = enc_tensors[name]

    # down arm:
    # add nb_levels of conv + ReLu + conv + ReLu. Pool after each of first nb_levels - 1 layers
    for level in range(nb_levels):
        for conv in range(nb_conv_per_level):
            if conv_dropout > 0:
                name = '%s_dropout_%d_%d' % (prefix, level, conv)
                enc_tensors[name] = KL.Dropout(conv_dropout)(last_tensor)
                last_tensor = enc_tensors[name]

            name = '%s_conv_%d_%d' % (prefix, level, conv)
            nb_lvl_feats = np.round(nb_features * feat_mult**level).astype(int)
            enc_tensors[name] = convL(nb_lvl_feats,
                                      conv_size,
                                      **conv_kwargs,
                                      name=name)(last_tensor)
            last_tensor = enc_tensors[name]

        # max pool
        if use_strided_convolution_maxpool:
            name = '%s_strided_conv_%d' % (prefix, level)
            enc_tensors[name] = convL(nb_lvl_feats,
                                      pool_size,
                                      **conv_kwargs,
                                      name=name)(last_tensor)
            last_tensor = enc_tensors[name]
        else:
            name = '%s_maxpool_%d' % (prefix, level)
            enc_tensors[name] = maxpool(pool_size=pool_size,
                                        name=name,
                                        padding=padding)(last_tensor)
            last_tensor = enc_tensors[name]

    # dense layer
    if final_layer == 'dense-sigmoid':

        name = "%s_flatten" % prefix
        enc_tensors[name] = KL.Flatten(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_dense' % prefix
        enc_tensors[name] = KL.Dense(1, name=name,
                                     activation="sigmoid")(last_tensor)

    elif final_layer == 'dense-tanh':

        name = "%s_flatten" % prefix
        enc_tensors[name] = KL.Flatten(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_dense' % prefix
        enc_tensors[name] = KL.Dense(1, name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        # Omittting BatchNorm for now, it seems to have a cpu vs gpu problem
        # https://github.com/tensorflow/tensorflow/pull/8906
        # https://github.com/fchollet/keras/issues/5802
        # name = '%s_%s_bn' % prefix
        # enc_tensors[name] = KL.BatchNormalization(axis=batch_norm, name=name)(last_tensor)
        # last_tensor = enc_tensors[name]

        name = '%s_%s_tanh' % prefix
        enc_tensors[name] = KL.Activation(activation="tanh",
                                          name=name)(last_tensor)

    elif final_layer == 'dense-softmax':

        name = "%s_flatten" % prefix
        enc_tensors[name] = KL.Flatten(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_dense' % prefix
        enc_tensors[name] = KL.Dense(nb_labels,
                                     name=name,
                                     activation="softmax")(last_tensor)

    # global max pooling layer
    elif final_layer == 'myglobalmaxpooling':

        name = '%s_batch_norm' % prefix
        enc_tensors[name] = KL.BatchNormalization(axis=batch_norm,
                                                  name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_global_max_pool' % prefix
        enc_tensors[name] = KL.Lambda(_global_max_nd, name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_global_max_pool_reshape' % prefix
        enc_tensors[name] = KL.Reshape((1, 1), name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        # cannot do activation in lambda layer. Could code inside, but will do extra lyaer
        name = '%s_global_max_pool_sigmoid' % prefix
        enc_tensors[name] = KL.Conv1D(1,
                                      1,
                                      name=name,
                                      activation="sigmoid",
                                      use_bias=True)(last_tensor)

    elif final_layer == 'globalmaxpooling':

        name = '%s_conv_to_featmaps' % prefix
        enc_tensors[name] = KL.Conv3D(2, 1, name=name,
                                      activation="relu")(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_global_max_pool' % prefix
        enc_tensors[name] = KL.GlobalMaxPooling3D(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        # cannot do activation in lambda layer. Could code inside, but will do extra lyaer
        name = '%s_global_max_pool_softmax' % prefix
        enc_tensors[name] = KL.Activation('softmax', name=name)(last_tensor)

    last_tensor = enc_tensors[name]

    # create the model
    model = Model(inputs=[enc_tensors['%s_input' % prefix]],
                  outputs=[last_tensor],
                  name=model_name)
    return model