def make_discriminator_model():
    """ Discriminator network structure.

  Returns:
    Sequential model.

  """
    model = tf.keras.Sequential()
    model.add(layers.InputLayer(input_shape=(28, 28, 1)))
    model.add(
        layers.Conv2D(64,
                      3,
                      strides=(2, 2),
                      padding='same',
                      activation=tf.nn.relu))

    model.add(
        layers.Conv2D(128,
                      3,
                      strides=(2, 2),
                      activation=tf.nn.relu,
                      padding='same'))

    model.add(layers.Flatten())
    model.add(layers.Dense(1, activation=tf.nn.sigmoid))

    return model
Exemple #2
0
def make_encoder_model():
    """ Encoder network structure.

  Returns:
    tf.keras.Model.

  """
    model = tf.keras.Sequential()
    model.add(layers.InputLayer(input_shape=(28, 28, 1)))
    model.add(
        layers.Conv2D(32, (3, 3),
                      strides=(2, 2),
                      padding='same',
                      activation=tf.nn.relu))

    model.add(
        layers.Conv2D(64, (3, 3),
                      strides=(2, 2),
                      activation=tf.nn.relu,
                      padding='same'))

    model.add(layers.Flatten())
    model.add(layers.Dense(64))

    return model
Exemple #3
0
def deep(features_shape, number_of_classes, activation_function='relu'):
    model = models.Sequential()

    # Input
    model.add(
        layers.InputLayer(input_shape=features_shape,
                          name='Inputs',
                          dtype='float32'))

    # Flatten
    model.add(layers.Flatten(name='Flatten'))

    # Dense block
    model.add(
        layers.Dense(units=512, activation=activation_function, name='Dense1'))
    model.add(
        layers.Dense(units=512, activation=activation_function, name='Dense2'))
    model.add(
        layers.Dense(units=512, activation=activation_function, name='Dense3'))

    # Predictions
    model.add(
        layers.Dense(units=number_of_classes,
                     activation=activation_function,
                     name='Prediction'))

    # Print network summary
    model.summary()

    return model
  def __init__(self, latent_dim):
    super(CVAE, self).__init__()
    self.latent_dim = latent_dim
    self.inference_net = tf.keras.Sequential(
      [
        layers.InputLayer(input_shape=(28, 28, 1)),
        layers.Conv2D(filters=32, kernel_size=3, strides=(2, 2),
                      activation=tf.nn.relu),
        layers.Conv2D(filters=64, kernel_size=3, strides=(2, 2),
                      activation=tf.nn.relu),
        layers.Flatten(),
        # No activation
        layers.Dense(latent_dim + latent_dim),
      ]
    )

    self.generative_net = tf.keras.Sequential(
      [
        layers.InputLayer(input_shape=(latent_dim,)),
        layers.Dense(units=7 * 7 * 32, activation=tf.nn.relu),
        layers.Reshape(target_shape=(7, 7, 32)),
        layers.Conv2DTranspose(
          filters=64,
          kernel_size=3,
          strides=(2, 2),
          padding="SAME",
          activation=tf.nn.relu),
        layers.Conv2DTranspose(
          filters=32,
          kernel_size=3,
          strides=(2, 2),
          padding="SAME",
          activation=tf.nn.relu),
        # No activation
        layers.Conv2DTranspose(
          filters=1, kernel_size=3, strides=(1, 1), padding="SAME"),
      ]
    )
Exemple #5
0
def create_decoder():
    return [
        layers.InputLayer(input_shape=[encoded_size]),
        layers.Reshape([1, 1, encoded_size]),
        deconv2D(2 * base_depth,
                 7,
                 strides=1,
                 padding='valid',
                 name='Decoder0'),
        deconv2D(2 * base_depth, 5, strides=1, name='Decoder1'),
        deconv2D(2 * base_depth, 5, strides=2, name='Decoder2'),
        deconv2D(base_depth, 5, strides=1, name='Decoder3'),
        deconv2D(base_depth, 5, strides=2, name='Decoder4'),
        deconv2D(base_depth, 5, strides=1, name='Decoder5'),
        conv2D(1, 5, strides=1, activation=None, name='Decoder6'),
        layers.Flatten()
    ]
Exemple #6
0
def create_encoder(input_shape):
    return [
        layers.InputLayer(input_shape=input_shape),
        conv2D(base_depth, 5, strides=1, name='Encoder0'),
        conv2D(base_depth, 5, strides=2, name='Encoder1'),
        conv2D(2 * base_depth, 5, strides=1, name='Encoder2'),
        conv2D(2 * base_depth, 5, strides=2, name='Encoder3'),
        conv2D(4 * encoded_size,
               7,
               strides=1,
               padding='valid',
               name='Encoder4'),
        layers.Flatten(),
        layers.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size),
                     activation=None,
                     name='Encoder5')
    ]
Exemple #7
0
    def __init__(self, input_shape,
                 batch_size,
                 num_class,
                 fusion_mode,
                 output_stride,
                 weights=None,
                 name='DeeplabV3',
                 backbone='resnet50',
                 backbone_weights=None,
                 dilated=True,
                 multi_grid=False,
                 multi_dilation=None,
                 norm_layer=None,
                 norm_kwargs=None,
                 conv_trainable=True,
                 **kwargs):

        self.fusion_mode = fusion_mode
        if self.fusion_mode in ('bgr', 'hha'):
            backbone_input_shape = [batch_size, input_shape[0], input_shape[1], 3]
        elif self.fusion_mode == 'bgr_hha':
            backbone_input_shape = [batch_size, input_shape[0], input_shape[1], 6]
        elif self.fusion_mode in ('bgr_hha_xyz', 'bgr_hha_gw'):
            backbone_input_shape = [batch_size, input_shape[0], input_shape[1], 9]
        else:
            logging.error("Unknown fusion mode.")
            return
        super(DeeplabV3, self).__init__(backbone,
                                        backbone_input_shape,
                                        fusion_mode=fusion_mode,
                                        dilated=dilated,
                                        multi_grid=multi_grid,
                                        backbone_weights=backbone_weights,
                                        multi_dilation=multi_dilation,
                                        conv_trainable=conv_trainable,
                                        name=name)
        self.output_stride = output_stride
        self.input_layer = klayers.InputLayer(batch_input_shape=[batch_size] + input_shape, dtype=tf.float32)
        self.split_inputs = klayers.Lambda(lambda x: tf.split(axis=-1, num_or_size_splits=[3, 3, 1, 3], value=x))

        self.concat_bgr_hha = klayers.Concatenate()

        self.head = DeepLabHead(num_class, norm_layer=norm_layer, norm_kwargs=norm_kwargs,
                                conv_trainable=True, **kwargs)
Exemple #8
0
def get_model_from_layers(model_layers,
                          input_shape=None,
                          input_dtype=None,
                          name=None,
                          input_ragged=None,
                          input_sparse=None):
    """Builds a model from a sequence of layers.

  Args:
    model_layers: The layers used to build the network.
    input_shape: Shape tuple of the input or 'TensorShape' instance.
    input_dtype: Datatype of the input.
    name: Name for the model.
    input_ragged: Boolean, whether the input data is a ragged tensor.
    input_sparse: Boolean, whether the input data is a sparse tensor.

  Returns:
    A Keras model.
  """

    model_type = get_model_type()
    if model_type == 'subclass':
        inputs = None
        if input_ragged or input_sparse:
            inputs = layers.Input(shape=input_shape,
                                  dtype=input_dtype,
                                  ragged=input_ragged,
                                  sparse=input_sparse)
        return _SubclassModel(model_layers, name=name, input_tensor=inputs)

    if model_type == 'subclass_custom_build':
        layer_generating_func = lambda: model_layers
        return _SubclassModelCustomBuild(layer_generating_func, name=name)

    if model_type == 'sequential':
        model = models.Sequential(name=name)
        if input_shape:
            model.add(
                layers.InputLayer(input_shape=input_shape,
                                  dtype=input_dtype,
                                  ragged=input_ragged,
                                  sparse=input_sparse))
        for layer in model_layers:
            model.add(layer)
        return model

    if model_type == 'functional':
        if not input_shape:
            raise ValueError(
                'Cannot create a functional model from layers with no '
                'input shape.')
        inputs = layers.Input(shape=input_shape,
                              dtype=input_dtype,
                              ragged=input_ragged,
                              sparse=input_sparse)
        outputs = inputs
        for layer in model_layers:
            outputs = layer(outputs)
        return models.Model(inputs, outputs, name=name)

    raise ValueError('Unknown model type {}'.format(model_type))
Exemple #9
0
batch_size = 150
sequenceGenerator = hashCorpusSequence(X_train, y_train, batch_size)
validationSeqGen = hashCorpusSequenceVal(X_val, y_val, batch_size)

# res = int(math.sqrt(maxSequenceLen))
# classTitles = ["benign", "malware", "ransomware"]
#
# sampleCount = random.randrange(0, 2988)
# # plt.title("Sample " + str(sampleCount) + " Converted to Grayscale Image\n(" +
# #           classTitles[sequenceGenerator.__getitem__(sampleCount)[1].tolist()[0].index(1)] + ")")
# plt.imshow(sequenceGenerator.__getitem__(sampleCount)[0][0].reshape(res, res), cmap='gray')

# Defining the ML model
model = Sequential()

model.add(layers.InputLayer(input_shape=(100, 100, 1)))
model.add(layers.SpatialDropout2D(rate=0.2))
model.add(layers.Conv2D(32, kernel_size=3, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.SpatialDropout2D(rate=0.1))
model.add(layers.Conv2D(16, kernel_size=3, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.SpatialDropout2D(rate=0.1))
model.add(layers.Flatten())
model.add(layers.Dense(3, activation='softmax'))

tensorboard = TensorBoard(log_dir="logs/{}".format(time()))

model.compile(optimizer="adamax",
              loss='categorical_crossentropy',
              metrics=['accuracy'])
Exemple #10
0
from tensorflow.python.keras import layers, Sequential
from tensorflow.python.keras.utils import plot_model

conv_filter = (3, 3)
conv_strides = (1, 1)
conv_padding = 'SAME'

pool_size = (2, 2)
pool_stride = (2, 2)
# There are 16 convolutions and hence the name vgg16
model = Sequential()
model.add(layers.InputLayer(input_shape=(224, 224, 3)))
# Conv 64 x 2 and max pool
model.add(
    layers.Conv2D(filters=64,
                  kernel_size=conv_filter,
                  activation='relu',
                  strides=conv_strides,
                  padding=conv_padding))
model.add(
    layers.Conv2D(filters=64,
                  kernel_size=conv_filter,
                  activation='relu',
                  strides=conv_strides,
                  padding=conv_padding))

model.add(layers.MaxPool2D(pool_size=pool_size, strides=pool_stride))

# Conv 128 x 2 and max pool
model.add(
    layers.Conv2D(filters=128,
Exemple #11
0
images = images[new_order]
labels = labels[new_order]

num_classes = len(np.unique(labels))
size = len(images)

# split testing and training sets
(X_train, X_test) = images[(int)(0.1 * size):], images[:(int)(0.1 * size)]

(Y_train, Y_test) = labels[(int)(0.1 * size):], labels[:(int)(0.1 * size)]

Y_train = utils.to_categorical(Y_train, num_classes)
Y_test = utils.to_categorical(Y_test, num_classes)

model = tf.keras.Sequential()
model.add(layers.InputLayer(input_shape=(28, 28, 3)))
model.add(layers.Conv2D(16, (2, 2), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(32, (2, 2), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(64, (2, 2), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(500, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(62, activation='softmax'))

model.summary()

model.compile(loss='categorical_crossentropy',
Exemple #12
0
class histSequenceVal(histSequence):
    def __init__(self, x, y, batch_size):
        self.x, self.y = x, y
        self.batch_size = batch_size


batch_size = 1000
sequenceGenerator = histSequence(X_train, y_train, batch_size)
validationSeqGen = histSequenceVal(X_val, y_val, batch_size)
print(validationSeqGen.__getitem__(0))

# Defining the ML model
model = Sequential()

model.add(layers.InputLayer(input_shape=(50, )))
model.add(layers.Dense(256, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.2))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(32, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(16, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dense(3, activation='softmax'))

tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
Exemple #13
0
            resized_img = np.array(resized_img)
            train.append(resized_img)
            label.append(count)
            count += 1

train = np.array(train)
print(train)
label = np.array(label)
train = 1.0 / 255 * train

num_classes = 7
label = utils.to_categorical(label, num_classes)

# the ML model
model = tf.keras.Sequential()
model.add(layers.InputLayer(input_shape=(64, 64, 3)))
model.add(layers.Conv2D(16, (2, 2), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(32, (2, 2), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Conv2D(64, (2, 2), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(500, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(7, activation='softmax'))
model.summary()

model.load_weights("/facesdb/model/first_run.json")
Exemple #14
0
def init_model(learning_rate, dense_layers, dense_nodes, activation):
    """
    Hyper-parameters:
    learning_rate: Learning-rate for the optimizer.
    dense_layers:  Number of dense layers.
    dense_nodes:   Number of nodes in each dense layer.
    activation:    Activation function for all layers.
    """

    # Start construction of a Keras Sequential model.
    model = models.Sequential()

    # Add an input layer which is similar to a feed_dict in TensorFlow.
    # Note that the input-shape must be a tuple containing the image-size.
    model.add(layers.InputLayer(input_shape=(IMG_SIZE_flat,)))

    # The input from MNIST is a flattened array with 784 elements,
    # but the convolutional layers expect images with shape (28, 28, 1)
    model.add(layers.Reshape(IMG_SHAPE_FULL))

    # First convolutional layer.
    # There are many hyper-parameters in this layer, but we only
    # want to optimize the activation-function in this example.
    model.add(layers.Conv2D(
        kernel_size=5, strides=1, filters=16, padding='same',
        activation=activation, name='layer_conv1'))
    model.add(layers.MaxPooling2D(pool_size=2, strides=2))

    # Second convolutional layer.
    # Again, we only want to optimize the activation-function here.
    model.add(layers.Conv2D(
        kernel_size=5, strides=1, filters=36, padding='same',
        activation=activation, name='layer_conv2'))
    model.add(layers.MaxPooling2D(pool_size=2, strides=2))

    # Flatten the 4-rank output of the convolutional layers
    # to 2-rank that can be input to a fully-connected / dense layer.
    model.add(layers.Flatten())

    # Add fully-connected / dense layers.
    # The number of layers is a hyper-parameter we want to optimize.
    for i in range(dense_layers):
        # Name of the layer. This is not really necessary
        # because Keras should give them unique names.
        name = 'layer_dense_{0}'.format(i+1)

        # Add the dense / fully-connected layer to the model.
        # This has two hyper-parameters we want to optimize:
        # The number of nodes and the activation function.
        model.add(layers.Dense(
            dense_nodes, activation=activation, name=name))

    # Last fully-connected / dense layer with softmax-activation
    # for use in classification.
    model.add(layers.Dense(NUM_CLASSES, activation='softmax'))

    # Use the Adam method for training the network.
    # We want to find the best learning-rate for the Adam method.
    optimizer = optimizers.Adam(lr=learning_rate)

    # In Keras we need to compile the model so it can be trained.
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Exemple #15
0
img_shape = (img_size, img_size)
img_shape_full = (img_size, img_size, 1)
num_classes = 10

# Data

tf.logging.set_verbosity(tf.logging.WARN)
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets('data', one_hot=True)
tf.logging.set_verbosity(tf.logging.INFO)
validation_data = data.validation.images, data.validation.labels

# Model

model = models.Sequential()
model.add(layers.InputLayer(input_shape=(img_size_flat,)))
model.add(layers.Reshape(img_shape_full))
model.add(layers.Conv2D(
    kernel_size=5,
    strides=1,
    filters=16,
    padding='same',
    activation=activation,
    name='layer_conv1'))
model.add(layers.MaxPooling2D(pool_size=2, strides=2))
model.add(layers.Conv2D(
    kernel_size=5,
    strides=1,
    filters=36,
    padding='same',
    activation=activation,
Exemple #16
0
def deep_cnn(features_shape, num_classes, activation_function='relu'):
    model = models.Sequential()

    model.add(
        layers.InputLayer(input_shape=features_shape,
                          name='Inputs',
                          dtype='float32'))

    # Block 1
    model.add(
        layers.Conv2D(input_shape=features_shape,
                      filters=32,
                      kernel_size=(3, 3),
                      strides=1,
                      padding='same',
                      activation=activation_function,
                      name='Block1_Convolution'))
    model.add(
        layers.MaxPooling2D(pool_size=(3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='Block1_MaxPooling'))
    model.add(layers.BatchNormalization(name='Block1_BatchNormalization'))

    # Block 2
    model.add(
        layers.Conv2D(filters=32,
                      kernel_size=(3, 3),
                      strides=1,
                      padding='same',
                      activation=activation_function,
                      name='Block2_Convolution'))
    model.add(
        layers.MaxPooling2D(pool_size=(3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='Block2_MaxPooling'))
    model.add(layers.BatchNormalization(name='Block2_BatchNormalization'))

    # Block 3
    model.add(
        layers.Conv2D(filters=32,
                      kernel_size=(3, 3),
                      strides=1,
                      padding='same',
                      activation=activation_function,
                      name='Block3_Convolution'))
    model.add(
        layers.MaxPooling2D(pool_size=(3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='Block3_MaxPooling'))
    model.add(layers.BatchNormalization(name='Block3_BatchNormalization'))

    # Flatten
    model.add(layers.Flatten(name='Flatten'))

    # Dense block
    model.add(
        layers.Dense(units=64,
                     activation=activation_function,
                     name='Dense_Dense'))
    model.add(layers.BatchNormalization(name='Dense_BatchNormalization'))
    model.add(layers.Dropout(rate=0.2, name='Dense_Dropout'))

    # Predictions
    model.add(
        layers.Dense(units=num_classes,
                     activation='softmax',
                     name='Predictions_Dense'))

    # Print network summary
    model.summary()

    return model