Exemplo n.º 1
0
def build_cnn_model(n_features: int, n_classes: int):
    """Build the P1FP(C) model using Keras."""
    model = keras.Sequential()
    model.add(layers.Reshape((1, n_features, 1), input_shape=(n_features, ),
              name="input"))

    model.add(layers.Conv2D(
        128, 12, activation="relu", kernel_regularizer="l2", padding="same"))
    model.add(layers.MaxPool2D(10, padding="same"))
    model.add(layers.Lambda(nn.local_response_normalization))

    model.add(layers.Conv2D(
        128, 12, activation="relu", kernel_regularizer="l2", padding="same"))
    model.add(layers.MaxPool2D(10, padding="same"))
    model.add(layers.Lambda(nn.local_response_normalization))

    # It is flattened for the computation regardless, however tflearn retained
    # the flattened result whereas keras does not
    model.add(layers.Flatten())
    model.add(layers.Dense(256, activation="tanh"))
    model.add(layers.Dropout(rate=0.2))
    model.add(layers.Dense(n_classes, activation="softmax", name="target"))

    learning_rate = keras.optimizers.schedules.ExponentialDecay(
        0.05, decay_steps=1000, decay_rate=0.96)
    model.compile(
        optimizer=keras.optimizers.SGD(learning_rate=learning_rate),
        loss="categorical_crossentropy",
        metrics=[keras.metrics.TopKCategoricalAccuracy(3), "accuracy"])

    return model
Exemplo n.º 2
0
def get_keras_layers_for_mnist_experiment(num_components):
    """Get Keras layers for the MNIST experiment.

  Args:
    num_components: (int) number of components to use for every layer.

  Returns:
    A list of lists of `keras.layer.Layer`s, where the outer index corresponds
    to layer id, and inner index to component id within a layer.
  """
    keras_layers = []
    filters = 4

    keras_layers.append([
        layers.Conv2D(filters=filters, kernel_size=5, activation="relu")
        for _ in range(num_components)
    ])

    keras_layers.append([layers.AveragePooling2D(pool_size=2)])

    keras_layers.append([
        layers.Conv2D(filters=filters, kernel_size=3, activation="relu")
        for _ in range(num_components)
    ])

    keras_layers.append([layers.AveragePooling2D(pool_size=2)])

    keras_layers.append([
        layers.Conv2D(filters=filters, kernel_size=3, activation="relu")
        for _ in range(num_components)
    ])

    keras_layers.append([layers.Flatten()])

    keras_layers.append([layers.Dropout(0.5)])

    return keras_layers
Exemplo n.º 3
0
def get_keras_layers_for_general_diversity_and_depth_model(
        layer_description, num_filters, num_layers, num_downsamples,
        group_norm_num_groups):
    """Gets Keras layers for the Omniglot and CIFAR-100 experiments.

  This model is a generalized version of the one proposed by the authors of
  "Diversity and Depth in Per-Example Routing Models"
  (https://openreview.net/pdf?id=BkxWJnC9tX).

  Args:
    layer_description: (list of string) description of a single layer, see
      `get_components_layer_for_general_diversity_and_depth_model`.
    num_filters: (int) number of filters for each convolution.
    num_layers: (int) number of layers.
    num_downsamples: (int) number of times the input should be downsampled by a
      factor of 2 before reaching the linear task-specific heads.
    group_norm_num_groups: (int) number of groups to use for group
      normalization.

  Returns:
    A list of lists of `keras.layer.Layer`s, where the outer index corresponds
    to layer id, and inner index to component id within a layer.
  """
    keras_layers = []

    # Initial shared 1x1 convolution, which increases the number of channels
    # from 1 to `num_filters`.
    keras_layers.append(
        [layers.Conv2D(filters=num_filters, kernel_size=1, padding="same")])

    keras_layers.append([GroupNorm(num_groups=group_norm_num_groups)])

    keras_layers.append([layers.ReLU()])

    downsampling_interval = num_layers / num_downsamples

    # Subset of `range(0, num_layers)` - subset of layers for downsampling.
    downsampling_layers = [
        int(downsampling_interval * i) for i in range(num_downsamples)
    ]

    for layer_id in range(num_layers):
        if layer_id in downsampling_layers:
            layer_strides = 2
        else:
            layer_strides = 1

        keras_layers.append(
            get_components_layer_for_general_diversity_and_depth_model(
                layer_description, num_filters, group_norm_num_groups,
                layer_strides))

        keras_layers.append([GroupNorm(num_groups=group_norm_num_groups)])

        keras_layers.append([layers.ReLU()])

    # At this point, the feature map is `2^num_downsamples` times smaller.
    keras_layers.append([layers.Flatten()])

    keras_layers.append([layers.Dropout(0.5)])

    return keras_layers
Exemplo n.º 4
0
def stack_layers(inputs, net_layers, kernel_initializer='glorot_uniform'):
  """Builds the architecture of the network by applying each layer specified in net_layers to inputs.

  Args:
    inputs: a dict containing input_types and input_placeholders for each key
      and value pair, respecively.
    net_layers:  a list of dicts containing all layers to be used in the
      network, where each dict describes one such layer. each dict requires the
      key 'type'. all other keys are dependent on the layer type.
    kernel_initializer: initialization configuration passed to keras (see keras
      initializers).

  Returns:
    outputs: a dict formatted in much the same way as inputs. it
      contains input_types and output_tensors for each key and value pair,
      respectively, where output_tensors are the outputs of the
      input_placeholders in inputs after each layer in net_layers is applied.
  """
  outputs = dict()

  for key in inputs:
    outputs[key] = inputs[key]

  for layer in net_layers:
    # check for l2_reg argument
    l2_reg = layer.get('l2_reg')
    if l2_reg:
      l2_reg = l2(layer['l2_reg'])

    # create the layer
    if layer['type'] in [
        'softplus', 'softsign', 'softmax', 'tanh', 'sigmoid', 'relu', 'selu'
    ]:
      l = layers.Dense(
          layer['size'],
          activation=layer['type'],
          kernel_initializer=kernel_initializer,
          kernel_regularizer=l2_reg,
          name=layer.get('name'))
    elif layer['type'] == 'None':
      l = layers.Dense(
          layer['size'],
          kernel_initializer=kernel_initializer,
          kernel_regularizer=l2_reg,
          name=layer.get('name'))
    elif layer['type'] == 'Conv2D':
      l = layers.Conv2D(
          layer['channels'],
          kernel_size=layer['kernel'],
          activation='relu',
          data_format='channels_last',
          kernel_regularizer=l2_reg,
          name=layer.get('name'))
    elif layer['type'] == 'BatchNormalization':
      l = layers.BatchNormalization(name=layer.get('name'))
    elif layer['type'] == 'MaxPooling2D':
      l = layers.MaxPooling2D(
          pool_size=layer['pool_size'],
          data_format='channels_first',
          name=layer.get('name'))
    elif layer['type'] == 'Dropout':
      l = layers.Dropout(layer['rate'], name=layer.get('name'))
    elif layer['type'] == 'Flatten':
      l = layers.Flatten(name=layer.get('name'))
    else:
      raise ValueError("Invalid layer type '{}'".format(layer['type']))

    # apply the layer to each input in inputs
    for k in outputs:
      outputs[k] = l(outputs[k])

  return outputs
Exemplo n.º 5
0
def build_model(n_features: int, n_classes: int):
    """Create and return the DeepFingerprinting Model."""
    model = keras.Sequential()
    # Block1
    filter_num = ['None', 32, 64, 128, 256]
    kernel_size = ['None', 8, 8, 8, 8]
    conv_stride_size = ['None', 1, 1, 1, 1]
    pool_stride_size = ['None', 4, 4, 4, 4]
    pool_size = ['None', 8, 8, 8, 8]

    model.add(layers.Reshape((n_features, 1), input_shape=(n_features, )))
    model.add(
        layers.Conv1D(filters=filter_num[1],
                      kernel_size=kernel_size[1],
                      strides=conv_stride_size[1],
                      padding='same',
                      name='block1_conv1'))
    model.add(layers.BatchNormalization(axis=-1))
    model.add(layers.ELU(alpha=1.0, name='block1_adv_act1'))
    model.add(
        layers.Conv1D(filters=filter_num[1],
                      kernel_size=kernel_size[1],
                      strides=conv_stride_size[1],
                      padding='same',
                      name='block1_conv2'))
    model.add(layers.BatchNormalization(axis=-1))
    model.add(layers.ELU(alpha=1.0, name='block1_adv_act2'))
    model.add(
        layers.MaxPooling1D(pool_size=pool_size[1],
                            strides=pool_stride_size[1],
                            padding='same',
                            name='block1_pool'))
    model.add(layers.Dropout(0.1, name='block1_dropout'))

    model.add(
        layers.Conv1D(filters=filter_num[2],
                      kernel_size=kernel_size[2],
                      strides=conv_stride_size[2],
                      padding='same',
                      name='block2_conv1'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block2_act1'))

    model.add(
        layers.Conv1D(filters=filter_num[2],
                      kernel_size=kernel_size[2],
                      strides=conv_stride_size[2],
                      padding='same',
                      name='block2_conv2'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block2_act2'))
    model.add(
        layers.MaxPooling1D(pool_size=pool_size[2],
                            strides=pool_stride_size[3],
                            padding='same',
                            name='block2_pool'))
    model.add(layers.Dropout(0.1, name='block2_dropout'))

    model.add(
        layers.Conv1D(filters=filter_num[3],
                      kernel_size=kernel_size[3],
                      strides=conv_stride_size[3],
                      padding='same',
                      name='block3_conv1'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block3_act1'))
    model.add(
        layers.Conv1D(filters=filter_num[3],
                      kernel_size=kernel_size[3],
                      strides=conv_stride_size[3],
                      padding='same',
                      name='block3_conv2'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block3_act2'))
    model.add(
        layers.MaxPooling1D(pool_size=pool_size[3],
                            strides=pool_stride_size[3],
                            padding='same',
                            name='block3_pool'))
    model.add(layers.Dropout(0.1, name='block3_dropout'))

    model.add(
        layers.Conv1D(filters=filter_num[4],
                      kernel_size=kernel_size[4],
                      strides=conv_stride_size[4],
                      padding='same',
                      name='block4_conv1'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block4_act1'))
    model.add(
        layers.Conv1D(filters=filter_num[4],
                      kernel_size=kernel_size[4],
                      strides=conv_stride_size[4],
                      padding='same',
                      name='block4_conv2'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block4_act2'))
    model.add(
        layers.MaxPooling1D(pool_size=pool_size[4],
                            strides=pool_stride_size[4],
                            padding='same',
                            name='block4_pool'))
    model.add(layers.Dropout(0.1, name='block4_dropout'))

    model.add(layers.Flatten(name='flatten'))
    model.add(
        layers.Dense(512,
                     kernel_initializer=initializers.glorot_uniform(seed=0),
                     name='fc1'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='fc1_act'))

    model.add(layers.Dropout(0.7, name='fc1_dropout'))

    model.add(
        layers.Dense(512,
                     kernel_initializer=initializers.glorot_uniform(seed=0),
                     name='fc2'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='fc2_act'))

    model.add(layers.Dropout(0.5, name='fc2_dropout'))

    model.add(
        layers.Dense(n_classes,
                     kernel_initializer=initializers.glorot_uniform(seed=0),
                     name='fc3'))
    model.add(layers.Activation('softmax', name="softmax"))
    model.compile(loss="categorical_crossentropy",
                  optimizer=keras.optimizers.Adamax(lr=0.002,
                                                    beta_1=0.9,
                                                    beta_2=0.999,
                                                    epsilon=1e-08,
                                                    decay=0.0),
                  metrics=["accuracy"])

    return model
Exemplo n.º 6
0
#testing = testing.prefetch(tf.data.experimental.AUTOTUNE)

model = tf.compat.v1.keras.Sequential()
model.add(
    layers.Conv2D(filters=64,
                  kernel_size=4,
                  strides=2,
                  padding='valid',
                  use_bias=True,
                  input_shape=(32, 32, 3)))
model.add(layers.BatchNormalization())
model.add(layers.Activation(tf.nn.leaky_relu))
model.add(layers.Conv2D(128, 4, 2, 'valid', use_bias=True))
model.add(layers.BatchNormalization())
model.add(layers.Activation(tf.nn.leaky_relu))
model.add(layers.Conv2D(256, 1, 1, 'valid', use_bias=True))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(256, 1, 1, 'valid', use_bias=True))
model.add(layers.Flatten())
model.add(layers.Dense(100, activation='softmax'))

model.compile(optimizer='Adam',
              loss=tf.compat.v1.keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])
print(model)
print(model.summary())
model.fit(xtrain, ytrain, batch_size=64, epochs=50, verbose=1, shuffle=True)

test_loss, test_accuracy = model.evaluate(xtest, ytest, verbose=1)
print(test_accuracy)
print(test_loss)