def get_model(cfg, encoder_inputs, encoder_outputs):

    decoder_inputs = layers.Input(shape=(None, ),
                                  name='Decoder-Input')  # for teacher forcing

    dec_emb = layers.Embedding(cfg.num_input_tokens,
                               cfg.latent_dim,
                               name='Decoder-Embedding',
                               mask_zero=False)(decoder_inputs)

    dec_bn = layers.BatchNormalization(name='Decoder-Batchnorm-1')(dec_emb)

    decoder_gru = layers.GRU(cfg.latent_dim,
                             return_state=True,
                             return_sequences=True,
                             name='Decoder-GRU')

    decoder_gru_output, _ = decoder_gru(dec_bn, initial_state=encoder_outputs)

    x = layers.BatchNormalization(
        name='Decoder-Batchnorm-2')(decoder_gru_output)
    decoder_dense = layers.Dense(cfg.num_output_tokens,
                                 activation='softmax',
                                 name='Final-Output-Dense')

    decoder_outputs = decoder_dense(x)

    model = models.Model([encoder_inputs, decoder_inputs], decoder_outputs)

    return model
Exemple #2
0
def build_two_branch_model(inputs):
    """Two parallel convolutions followed by Add and Concat."""
    branch1 = layers.Conv2D(filters=10, kernel_size=3, name='conv1')(inputs)
    branch1 = layers.BatchNormalization(name='bn1')(branch1)
    branch1 = layers.Activation(tf.nn.relu, name='activation1')(branch1)

    branch2 = layers.Conv2D(filters=10, kernel_size=3, name='conv2')(inputs)
    branch2 = layers.BatchNormalization(name='bn2')(branch2)
    branch2 = layers.Activation(tf.nn.relu, name='activation2')(branch2)

    merge = layers.Add()([branch1, branch2])
    concat = layers.Concatenate(axis=-1)([merge, branch1, branch2])
    return concat, branch1, branch2
Exemple #3
0
def build_simple_keras_model(inputs):
    """Builds lightweight Keras model."""
    x = inputs
    x = layers.Conv2D(filters=10, kernel_size=3, name='conv')(x)
    x = layers.BatchNormalization(name='bn')(x)
    x = layers.Activation(tf.nn.relu, name='activation')(x)
    return x
Exemple #4
0
    def dilated_basic_1d_block(x):
        y = layers.Conv1D(
            filters, kernel_size, padding='causal', strides=stride,
            dilation_rate=dilations[0], use_bias=False,
            name='res{}{}_branch2a_{}'.format(stage_char, block_char, suffix),
            **PARAMETERS)(x)
        y = layers.BatchNormalization(
            epsilon=1e-5, name='bn{}{}_branch2a_{}'.format(
                stage_char, block_char, suffix))(y)
        y = layers.Activation('relu', name='res{}{}_branch2a_relu_{}'.format(
            stage_char, block_char, suffix))(y)

        y = layers.Conv1D(
            filters, kernel_size, padding='causal', use_bias=False,
            dilation_rate=dilations[1], name='res{}{}_branch2b_{}'.format(
                stage_char, block_char, suffix),
            **PARAMETERS)(y)
        y = layers.BatchNormalization(
            epsilon=1e-5, name='bn{}{}_branch2b_{}'.format(
                stage_char, block_char, suffix))(y)

        if block == 0:
            shortcut = layers.Conv1D(
                filters, 1, strides=stride, use_bias=False,
                name='res{}{}_branch1_{}'.format(
                    stage_char, block_char, suffix), **PARAMETERS)(x)
            shortcut = layers.BatchNormalization(
                epsilon=1e-5, name='bn{}{}_branch1_{}'.format(
                    stage_char, block_char,
                    suffix))(shortcut)
        else:
            shortcut = x

        y = layers.Add(
            name='res{}{}_{}'.format(stage_char, block_char, suffix))(
                [y, shortcut])
        y = layers.Activation(
            'relu', name='res{}{}_relu_{}'.format(
                stage_char, block_char, suffix))(y)
        return y
Exemple #5
0
def ResNet18(inputs, suffix, blocks=None, block=None, numerical_names=None):
    """Constructs a `keras.models.Model` object using the given block count.

    :param inputs: input tensor (e.g. an instance of `keras.layers.Input`)
    :param blocks: the network’s residual architecture
    :param block: a residual block (e.g. an instance of
        `keras_resnet.blocks.basic_2d`)
    :param numerical_names: list of bool, same size as blocks, used to
        indicate whether names of layers should include numbers or letters
    :return model: ResNet model with encoding output (if `include_top=False`)
        or classification output (if `include_top=True`)
    """
    if blocks is None:
        blocks = [2, 2, 2, 2]
    if block is None:
        block = dilated_basic_1d
    if numerical_names is None:
        numerical_names = [True] * len(blocks)

    x = layers.ZeroPadding1D(padding=3, name='padding_conv1_' + suffix)(inputs)
    x = layers.Conv1D(
        64, 7, strides=2, use_bias=False, name='conv1_' + suffix)(x)
    x = layers.BatchNormalization(
        epsilon=1e-5, name='bn_conv1_' + suffix)(x)
    x = layers.Activation('relu', name='conv1_relu_' + suffix)(x)
    x = layers.MaxPooling1D(
        3, strides=2, padding='same', name='pool1_' + suffix)(x)

    features = 64
    outputs = []

    for stage_id, iterations in enumerate(blocks):
        x = block(features, suffix, stage_id, 0, dilations=(1, 2),
                  numerical_name=False)(x)
        for block_id in range(1, iterations):
            x = block(features, suffix, stage_id, block_id, dilations=(4, 8),
                      numerical_name=(
                              block_id > 0 and numerical_names[stage_id]))(
                x)

        features *= 2
        outputs.append(x)

    x = layers.GlobalAveragePooling1D(name='pool5_' + suffix)(x)
    return x
def get_encoder_model(cfg):
    encoder_inputs = layers.Input(shape=(cfg.len_input_seq, ),
                                  name='Encoder-Input')

    x = layers.Embedding(cfg.num_input_tokens,
                         cfg.latent_dim,
                         name='Encoder-Embedding',
                         mask_zero=False)(encoder_inputs)

    x = layers.BatchNormalization(name='Encoder-Batchnorm-1')(x)

    _, state_h = layers.GRU(cfg.latent_dim, return_state=True,\
       name='Encoder-Last-GRU')(x)

    encoder_model = models.Model(inputs=encoder_inputs,
                                 outputs=state_h,
                                 name='Encoder-Model')

    encoder_outputs = encoder_model(encoder_inputs)

    return encoder_model, encoder_inputs, encoder_outputs
Exemple #7
0
def identity_block_base(input_tensor,
                        kernel_size,
                        filters,
                        stage,
                        block,
                        num_updates,
                        dropout_rate=0.,
                        use_variational_layers=False):
    """The identity block is the block that has no conv layer at shortcut.

  Arguments:
      input_tensor: input tensor
      kernel_size: default 3, the kernel size of
          middle conv layer at main path
      filters: list of integers, the filters of 3 conv layer at main path
      stage: integer, current stage label, used for generating layer names
      block: 'a','b'..., current block label, used for generating layer names
      num_updates: integer, total steps in an epoch (for weighting the loss)
      dropout_rate: float, always-on dropout rate.
      use_variational_layers: boolean, if true train a variational model

  Returns:
      x: Output tensor for the block.
  """
    filters1, filters2, filters3 = filters
    divergence_fn = lambda q, p, ignore: (tfd.kl_divergence(q, p) / num_updates
                                          )
    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    if not use_variational_layers:
        first_conv_2d = layers.Conv2D(
            filters1, (1, 1),
            use_bias=False,
            kernel_initializer='he_normal',
            kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
            name=conv_name_base + '2a')
        if dropout_rate > 0.:
            x = layers.Dropout(dropout_rate)(input_tensor, training=True)
            x = first_conv_2d(x)
        else:
            x = first_conv_2d(input_tensor)
        x = layers.BatchNormalization(axis=bn_axis,
                                      momentum=BATCH_NORM_DECAY,
                                      epsilon=BATCH_NORM_EPSILON,
                                      name=bn_name_base + '2a')(x)
        x = layers.Activation('relu')(x)
        if dropout_rate > 0.:
            x = layers.Dropout(dropout_rate)(x, training=True)
        x = layers.Conv2D(filters2,
                          kernel_size,
                          use_bias=False,
                          padding='same',
                          kernel_initializer='he_normal',
                          kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                          name=conv_name_base + '2b')(x)
        x = layers.BatchNormalization(axis=bn_axis,
                                      momentum=BATCH_NORM_DECAY,
                                      epsilon=BATCH_NORM_EPSILON,
                                      name=bn_name_base + '2b')(x)
        x = layers.Activation('relu')(x)
        if dropout_rate > 0.:
            x = layers.Dropout(dropout_rate)(x, training=True)
        x = layers.Conv2D(filters3, (1, 1),
                          use_bias=False,
                          kernel_initializer='he_normal',
                          kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                          name=conv_name_base + '2c')(x)
        x = layers.BatchNormalization(axis=bn_axis,
                                      momentum=BATCH_NORM_DECAY,
                                      epsilon=BATCH_NORM_EPSILON,
                                      name=bn_name_base + '2c')(x)
    else:
        x = tfpl.Convolution2DFlipout(
            filters1,
            kernel_size=(1, 1),
            padding='SAME',
            name=conv_name_base + '2a',
            kernel_divergence_fn=divergence_fn,
        )(input_tensor)
        x = layers.BatchNormalization(axis=bn_axis,
                                      momentum=BATCH_NORM_DECAY,
                                      epsilon=BATCH_NORM_EPSILON,
                                      name=bn_name_base + '2a')(x)
        x = layers.Activation('relu')(x)
        x = tfpl.Convolution2DFlipout(
            filters2,
            kernel_size=kernel_size,
            padding='SAME',
            activation=None,
            name=conv_name_base + '2b',
            kernel_divergence_fn=divergence_fn,
        )(x)
        x = layers.BatchNormalization(axis=bn_axis,
                                      momentum=BATCH_NORM_DECAY,
                                      epsilon=BATCH_NORM_EPSILON,
                                      name=bn_name_base + '2b')(x)
        x = layers.Activation('relu')(x)

        x = tfpl.Convolution2DFlipout(
            filters3,
            kernel_size=(1, 1),
            padding='SAME',
            activation=None,
            name=conv_name_base + '2c',
            kernel_divergence_fn=divergence_fn,
        )(x)
        x = layers.BatchNormalization(axis=bn_axis,
                                      momentum=BATCH_NORM_DECAY,
                                      epsilon=BATCH_NORM_EPSILON,
                                      name=bn_name_base + '2c')(x)
    x = layers.add([x, input_tensor])
    x = layers.Activation('relu')(x)
    return x
Exemple #8
0
def ResNet50(method, num_classes, num_updates, dropout_rate):
    """Instantiates the ResNet50 architecture.

  Args:
    method: `str`, method for accounting for uncertainty. Must be one of
      ['vanilla', 'll_dropout', 'll_svi', 'dropout', 'svi', 'dropout_nofirst']
    num_classes: `int` number of classes for image classification.
    num_updates: integer, total steps in an epoch (for weighting the loss)
    dropout_rate: Dropout rate for ll_dropout, dropout methods.

  Returns:
      A Keras model instance.
  pylint: disable=invalid-name
  """

    # Determine proper input shape
    if backend.image_data_format() == 'channels_first':
        input_shape = (3, 224, 224)
        bn_axis = 1
    else:
        input_shape = (224, 224, 3)
        bn_axis = 3

    if (method in ['dropout', 'll_dropout', 'dropout_nofirst'
                   ]) != (dropout_rate > 0.):
        raise ValueError(
            'Dropout rate should be nonzero iff a dropout method is used.'
            'Method is {}, dropout is {}.'.format(method, dropout_rate))

    use_variational_layers = method == 'svi'
    hidden_layer_dropout = dropout_rate if method in [
        'dropout', 'dropout_nofirst'
    ] else 0.

    img_input = layers.Input(shape=input_shape)
    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
    if (dropout_rate > 0.) and (method != 'dropout_nofirst'):
        x = layers.Dropout(hidden_layer_dropout)(x, training=True)
    x = layers.Conv2D(64, (7, 7),
                      use_bias=False,
                      strides=(2, 2),
                      padding='valid',
                      kernel_initializer='he_normal',
                      kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                      name='conv1')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  momentum=BATCH_NORM_DECAY,
                                  epsilon=BATCH_NORM_EPSILON,
                                  name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    conv_block = functools.partial(
        conv_block_base,
        num_updates=num_updates,
        dropout_rate=hidden_layer_dropout,
        use_variational_layers=use_variational_layers)
    identity_block = functools.partial(
        identity_block_base,
        num_updates=num_updates,
        dropout_rate=hidden_layer_dropout,
        use_variational_layers=use_variational_layers)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)

    if dropout_rate > 0.:
        x = layers.Dropout(dropout_rate)(x, training=True)

    if method in ['ll_svi', 'svi']:

        x = tfpl.dense_variational_v2.DenseVariational(
            units=num_classes,
            make_posterior_fn=posterior_mean_field,
            make_prior_fn=functools.partial(prior_trainable,
                                            num_updates=num_updates),
            use_bias=True,
            kl_weight=1. / num_updates,
            kl_use_exact=True,
            name='fc1000')(x)
    else:
        x = layers.Dense(num_classes,
                         kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                         bias_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                         name='fc1000')(x)

    # Create model.
    return models.Model(img_input, x, name='resnet50')
Exemple #9
0
def stack_layers(inputs, net_layers, kernel_initializer='glorot_uniform'):
  """Builds the architecture of the network by applying each layer specified in net_layers to inputs.

  Args:
    inputs: a dict containing input_types and input_placeholders for each key
      and value pair, respecively.
    net_layers:  a list of dicts containing all layers to be used in the
      network, where each dict describes one such layer. each dict requires the
      key 'type'. all other keys are dependent on the layer type.
    kernel_initializer: initialization configuration passed to keras (see keras
      initializers).

  Returns:
    outputs: a dict formatted in much the same way as inputs. it
      contains input_types and output_tensors for each key and value pair,
      respectively, where output_tensors are the outputs of the
      input_placeholders in inputs after each layer in net_layers is applied.
  """
  outputs = dict()

  for key in inputs:
    outputs[key] = inputs[key]

  for layer in net_layers:
    # check for l2_reg argument
    l2_reg = layer.get('l2_reg')
    if l2_reg:
      l2_reg = l2(layer['l2_reg'])

    # create the layer
    if layer['type'] in [
        'softplus', 'softsign', 'softmax', 'tanh', 'sigmoid', 'relu', 'selu'
    ]:
      l = layers.Dense(
          layer['size'],
          activation=layer['type'],
          kernel_initializer=kernel_initializer,
          kernel_regularizer=l2_reg,
          name=layer.get('name'))
    elif layer['type'] == 'None':
      l = layers.Dense(
          layer['size'],
          kernel_initializer=kernel_initializer,
          kernel_regularizer=l2_reg,
          name=layer.get('name'))
    elif layer['type'] == 'Conv2D':
      l = layers.Conv2D(
          layer['channels'],
          kernel_size=layer['kernel'],
          activation='relu',
          data_format='channels_last',
          kernel_regularizer=l2_reg,
          name=layer.get('name'))
    elif layer['type'] == 'BatchNormalization':
      l = layers.BatchNormalization(name=layer.get('name'))
    elif layer['type'] == 'MaxPooling2D':
      l = layers.MaxPooling2D(
          pool_size=layer['pool_size'],
          data_format='channels_first',
          name=layer.get('name'))
    elif layer['type'] == 'Dropout':
      l = layers.Dropout(layer['rate'], name=layer.get('name'))
    elif layer['type'] == 'Flatten':
      l = layers.Flatten(name=layer.get('name'))
    else:
      raise ValueError("Invalid layer type '{}'".format(layer['type']))

    # apply the layer to each input in inputs
    for k in outputs:
      outputs[k] = l(outputs[k])

  return outputs
Exemple #10
0
def build_model(
    n_classes: int,
    n_packet_features: int,
    n_meta_features: int = 7,
    dilations: bool = True,
    tag: str = "varcnn",
):
    """Build the Var-CNN model.

    The resulting model takes a single input of shape
    (n_samples, n_packet_features + n_meta_features). The meta features
    must be the rightmost (last) features in the matrix.  The model
    handles separating the two types of features and reshaping them
    as necessary.

    Parameters:
    -----------
    n_classes :
        The number of classes to be predicted.

    n_packet_features :
        The number of packet features such as the number of interarrival
        times or the number of packet directions or sizes.

    n_meta_features:
        The number of meta features such as total packet counts, total
        transmission duration, etc.
    """
    use_metadata = n_meta_features > 0

    # Constructs dir or time ResNet
    input_layer = keras.Input(
        shape=(n_packet_features + n_meta_features, ), name="input")

    layer = (Crop(end=n_packet_features)(input_layer)
             if use_metadata else input_layer)
    layer = layers.Reshape((n_packet_features, 1))(layer)
    output_layer = ResNet18(
        layer, tag, block=(dilated_basic_1d if dilations else basic_1d))

    concat_params = [output_layer]
    combined = concat_params[0]

    # Construct MLP for metadata
    if use_metadata:
        metadata_output = Crop(start=-n_meta_features)(input_layer)
        # consider this the embedding of all the metadata
        metadata_output = layers.Dense(32)(metadata_output)
        metadata_output = layers.BatchNormalization()(
            metadata_output)
        metadata_output = layers.Activation('relu')(metadata_output)

        concat_params.append(metadata_output)
        combined = layers.Concatenate()(concat_params)

    # Better to have final fc layer if combining multiple models
    if len(concat_params) > 1:
        combined = layers.Dense(1024)(combined)
        combined = layers.BatchNormalization()(combined)
        combined = layers.Activation('relu')(combined)
        combined = layers.Dropout(0.5)(combined)

    model_output = layers.Dense(units=n_classes, activation='softmax',
                                name='model_output')(combined)

    model = keras.Model(inputs=input_layer, outputs=model_output)
    model.compile(
        loss='categorical_crossentropy', metrics=['accuracy'],
        optimizer=keras.optimizers.Adam(0.001))

    return model
Exemple #11
0
def build_model(n_features: int, n_classes: int):
    """Create and return the DeepFingerprinting Model."""
    model = keras.Sequential()
    # Block1
    filter_num = ['None', 32, 64, 128, 256]
    kernel_size = ['None', 8, 8, 8, 8]
    conv_stride_size = ['None', 1, 1, 1, 1]
    pool_stride_size = ['None', 4, 4, 4, 4]
    pool_size = ['None', 8, 8, 8, 8]

    model.add(layers.Reshape((n_features, 1), input_shape=(n_features, )))
    model.add(
        layers.Conv1D(filters=filter_num[1],
                      kernel_size=kernel_size[1],
                      strides=conv_stride_size[1],
                      padding='same',
                      name='block1_conv1'))
    model.add(layers.BatchNormalization(axis=-1))
    model.add(layers.ELU(alpha=1.0, name='block1_adv_act1'))
    model.add(
        layers.Conv1D(filters=filter_num[1],
                      kernel_size=kernel_size[1],
                      strides=conv_stride_size[1],
                      padding='same',
                      name='block1_conv2'))
    model.add(layers.BatchNormalization(axis=-1))
    model.add(layers.ELU(alpha=1.0, name='block1_adv_act2'))
    model.add(
        layers.MaxPooling1D(pool_size=pool_size[1],
                            strides=pool_stride_size[1],
                            padding='same',
                            name='block1_pool'))
    model.add(layers.Dropout(0.1, name='block1_dropout'))

    model.add(
        layers.Conv1D(filters=filter_num[2],
                      kernel_size=kernel_size[2],
                      strides=conv_stride_size[2],
                      padding='same',
                      name='block2_conv1'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block2_act1'))

    model.add(
        layers.Conv1D(filters=filter_num[2],
                      kernel_size=kernel_size[2],
                      strides=conv_stride_size[2],
                      padding='same',
                      name='block2_conv2'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block2_act2'))
    model.add(
        layers.MaxPooling1D(pool_size=pool_size[2],
                            strides=pool_stride_size[3],
                            padding='same',
                            name='block2_pool'))
    model.add(layers.Dropout(0.1, name='block2_dropout'))

    model.add(
        layers.Conv1D(filters=filter_num[3],
                      kernel_size=kernel_size[3],
                      strides=conv_stride_size[3],
                      padding='same',
                      name='block3_conv1'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block3_act1'))
    model.add(
        layers.Conv1D(filters=filter_num[3],
                      kernel_size=kernel_size[3],
                      strides=conv_stride_size[3],
                      padding='same',
                      name='block3_conv2'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block3_act2'))
    model.add(
        layers.MaxPooling1D(pool_size=pool_size[3],
                            strides=pool_stride_size[3],
                            padding='same',
                            name='block3_pool'))
    model.add(layers.Dropout(0.1, name='block3_dropout'))

    model.add(
        layers.Conv1D(filters=filter_num[4],
                      kernel_size=kernel_size[4],
                      strides=conv_stride_size[4],
                      padding='same',
                      name='block4_conv1'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block4_act1'))
    model.add(
        layers.Conv1D(filters=filter_num[4],
                      kernel_size=kernel_size[4],
                      strides=conv_stride_size[4],
                      padding='same',
                      name='block4_conv2'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='block4_act2'))
    model.add(
        layers.MaxPooling1D(pool_size=pool_size[4],
                            strides=pool_stride_size[4],
                            padding='same',
                            name='block4_pool'))
    model.add(layers.Dropout(0.1, name='block4_dropout'))

    model.add(layers.Flatten(name='flatten'))
    model.add(
        layers.Dense(512,
                     kernel_initializer=initializers.glorot_uniform(seed=0),
                     name='fc1'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='fc1_act'))

    model.add(layers.Dropout(0.7, name='fc1_dropout'))

    model.add(
        layers.Dense(512,
                     kernel_initializer=initializers.glorot_uniform(seed=0),
                     name='fc2'))
    model.add(layers.BatchNormalization())
    model.add(layers.Activation('relu', name='fc2_act'))

    model.add(layers.Dropout(0.5, name='fc2_dropout'))

    model.add(
        layers.Dense(n_classes,
                     kernel_initializer=initializers.glorot_uniform(seed=0),
                     name='fc3'))
    model.add(layers.Activation('softmax', name="softmax"))
    model.compile(loss="categorical_crossentropy",
                  optimizer=keras.optimizers.Adamax(lr=0.002,
                                                    beta_1=0.9,
                                                    beta_2=0.999,
                                                    epsilon=1e-08,
                                                    decay=0.0),
                  metrics=["accuracy"])

    return model
Exemple #12
0
#training = tf.compat.v2.data.Dataset.from_tensor_slices((xtrain,ytrain))
#testing = tf.compat.v2.data.Dataset.from_tensor_slices((xtest,ytest))
#training = training.batch(64).shuffle(buffer_size=64)
#testing = testing.batch(64).shuffle(buffer_size=64)
#training = training.prefetch(tf.data.experimental.AUTOTUNE)
#testing = testing.prefetch(tf.data.experimental.AUTOTUNE)

model = tf.compat.v1.keras.Sequential()
model.add(
    layers.Conv2D(filters=64,
                  kernel_size=4,
                  strides=2,
                  padding='valid',
                  use_bias=True,
                  input_shape=(32, 32, 3)))
model.add(layers.BatchNormalization())
model.add(layers.Activation(tf.nn.leaky_relu))
model.add(layers.Conv2D(128, 4, 2, 'valid', use_bias=True))
model.add(layers.BatchNormalization())
model.add(layers.Activation(tf.nn.leaky_relu))
model.add(layers.Conv2D(256, 1, 1, 'valid', use_bias=True))
model.add(layers.BatchNormalization())
model.add(layers.Conv2D(256, 1, 1, 'valid', use_bias=True))
model.add(layers.Flatten())
model.add(layers.Dense(100, activation='softmax'))

model.compile(optimizer='Adam',
              loss=tf.compat.v1.keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])
print(model)
print(model.summary())