コード例 #1
0
def convert_all_kernels_in_model(model):
  """Converts all convolution kernels in a model from Theano to TensorFlow.

  Also works from TensorFlow to Theano.

  This is used for converting legacy Theano-saved model files.

  Arguments:
      model: target model for the conversion.
  """
  # Note: SeparableConvolution not included
  # since only supported by TF.
  conv_classes = {
      'Conv1D',
      'Conv2D',
      'Conv3D',
      'Conv2DTranspose',
  }
  to_assign = []
  for layer in model.layers:
    if layer.__class__.__name__ in conv_classes:
      original_kernel = K.get_value(layer.kernel)
      converted_kernel = convert_kernel(original_kernel)
      to_assign.append((layer.kernel, converted_kernel))
  K.batch_set_value(to_assign)
コード例 #2
0
 def _flip_conv_weights(self):
     # Theano > Tensorflow, just flips the weight arrays in the first 2 dims. Doesn't change shape.
     for layer in self.net.layers:
         if layer.__class__.__name__ == 'Conv2D':
             from tensorflow.keras import backend
             original_w = backend.eval(layer.kernel)
             from tensorflow.python.keras.utils.conv_utils import convert_kernel
             converted_w = convert_kernel(original_w)
             layer.kernel.assign(converted_w)
コード例 #3
0
ファイル: layer_utils.py プロジェクト: Wajih-O/tensorflow
def convert_all_kernels_in_model(model):
  """Converts all convolution kernels in a model from Theano to TensorFlow.

  Also works from TensorFlow to Theano.

  Arguments:
      model: target model for the conversion.
  """
  # Note: SeparableConvolution not included
  # since only supported by TF.
  conv_classes = {
      'Conv1D',
      'Conv2D',
      'Conv3D',
      'Conv2DTranspose',
  }
  to_assign = []
  for layer in model.layers:
    if layer.__class__.__name__ in conv_classes:
      original_kernel = K.get_value(layer.kernel)
      converted_kernel = convert_kernel(original_kernel)
      to_assign.append((layer.kernel, converted_kernel))
  K.batch_set_value(to_assign)
コード例 #4
0
ファイル: hdf5_format.py プロジェクト: rmlarsen/tensorflow
def preprocess_weights_for_loading(layer,
                                   weights,
                                   original_keras_version=None,
                                   original_backend=None):
  """Preprocess layer weights between different Keras formats.

  Converts layers weights from Keras 1 format to Keras 2 and also weights of
  CuDNN layers in Keras 2.

  Arguments:
      layer: Layer instance.
      weights: List of weights values (Numpy arrays).
      original_keras_version: Keras version for the weights, as a string.
      original_backend: Keras backend the weights were trained with,
          as a string.

  Returns:
      A list of weights values (Numpy arrays).
  """
  def convert_nested_bidirectional(weights):
    """Converts layers nested in `Bidirectional` wrapper.

    This function uses `preprocess_weights_for_loading()` for converting
    layers.

    Arguments:
        weights: List of weights values (Numpy arrays).

    Returns:
        A list of weights values (Numpy arrays).
    """
    num_weights_per_layer = len(weights) // 2
    forward_weights = preprocess_weights_for_loading(
        layer.forward_layer, weights[:num_weights_per_layer],
        original_keras_version, original_backend)
    backward_weights = preprocess_weights_for_loading(
        layer.backward_layer, weights[num_weights_per_layer:],
        original_keras_version, original_backend)
    return forward_weights + backward_weights

  def convert_nested_time_distributed(weights):
    """Converts layers nested in `TimeDistributed` wrapper.

    This function uses `preprocess_weights_for_loading()` for converting nested
    layers.

    Arguments:
        weights: List of weights values (Numpy arrays).

    Returns:
        A list of weights values (Numpy arrays).
    """
    return preprocess_weights_for_loading(
        layer.layer, weights, original_keras_version, original_backend)

  def convert_nested_model(weights):
    """Converts layers nested in `Model` or `Sequential`.

    This function uses `preprocess_weights_for_loading()` for converting nested
    layers.

    Arguments:
        weights: List of weights values (Numpy arrays).

    Returns:
        A list of weights values (Numpy arrays).
    """
    new_weights = []
    # trainable weights
    for sublayer in layer.layers:
      num_weights = len(sublayer.trainable_weights)
      if num_weights > 0:
        new_weights.extend(preprocess_weights_for_loading(
            layer=sublayer,
            weights=weights[:num_weights],
            original_keras_version=original_keras_version,
            original_backend=original_backend))
        weights = weights[num_weights:]

    # non-trainable weights
    for sublayer in layer.layers:
      num_weights = len([l for l in sublayer.weights
                         if l not in sublayer.trainable_weights])
      if num_weights > 0:
        new_weights.extend(preprocess_weights_for_loading(
            layer=sublayer,
            weights=weights[:num_weights],
            original_keras_version=original_keras_version,
            original_backend=original_backend))
        weights = weights[num_weights:]
    return new_weights

  # Convert layers nested in Bidirectional/Model/Sequential.
  # Both transformation should be ran for both Keras 1->2 conversion
  # and for conversion of CuDNN layers.
  if layer.__class__.__name__ == 'Bidirectional':
    weights = convert_nested_bidirectional(weights)
  if layer.__class__.__name__ == 'TimeDistributed':
    weights = convert_nested_time_distributed(weights)
  elif layer.__class__.__name__ in ['Model', 'Sequential']:
    weights = convert_nested_model(weights)

  if original_keras_version == '1':
    if layer.__class__.__name__ == 'TimeDistributed':
      weights = preprocess_weights_for_loading(
          layer.layer, weights, original_keras_version, original_backend)

    if layer.__class__.__name__ == 'Conv1D':
      shape = weights[0].shape
      # Handle Keras 1.1 format
      if shape[:2] != (layer.kernel_size[0], 1) or shape[3] != layer.filters:
        # Legacy shape:
        # (filters, input_dim, filter_length, 1)
        assert shape[0] == layer.filters and shape[2:] == (layer.kernel_size[0],
                                                           1)
        weights[0] = np.transpose(weights[0], (2, 3, 1, 0))
      weights[0] = weights[0][:, 0, :, :]

    if layer.__class__.__name__ == 'Conv2D':
      if layer.data_format == 'channels_first':
        # old: (filters, stack_size, kernel_rows, kernel_cols)
        # new: (kernel_rows, kernel_cols, stack_size, filters)
        weights[0] = np.transpose(weights[0], (2, 3, 1, 0))

    if layer.__class__.__name__ == 'Conv2DTranspose':
      if layer.data_format == 'channels_last':
        # old: (kernel_rows, kernel_cols, stack_size, filters)
        # new: (kernel_rows, kernel_cols, filters, stack_size)
        weights[0] = np.transpose(weights[0], (0, 1, 3, 2))
      if layer.data_format == 'channels_first':
        # old: (filters, stack_size, kernel_rows, kernel_cols)
        # new: (kernel_rows, kernel_cols, filters, stack_size)
        weights[0] = np.transpose(weights[0], (2, 3, 0, 1))

    if layer.__class__.__name__ == 'Conv3D':
      if layer.data_format == 'channels_first':
        # old: (filters, stack_size, ...)
        # new: (..., stack_size, filters)
        weights[0] = np.transpose(weights[0], (2, 3, 4, 1, 0))

    if layer.__class__.__name__ == 'GRU':
      if len(weights) == 9:
        kernel = np.concatenate([weights[0], weights[3], weights[6]], axis=-1)
        recurrent_kernel = np.concatenate(
            [weights[1], weights[4], weights[7]], axis=-1)
        bias = np.concatenate([weights[2], weights[5], weights[8]], axis=-1)
        weights = [kernel, recurrent_kernel, bias]

    if layer.__class__.__name__ == 'LSTM':
      if len(weights) == 12:
        # old: i, c, f, o
        # new: i, f, c, o
        kernel = np.concatenate(
            [weights[0], weights[6], weights[3], weights[9]], axis=-1)
        recurrent_kernel = np.concatenate(
            [weights[1], weights[7], weights[4], weights[10]], axis=-1)
        bias = np.concatenate(
            [weights[2], weights[8], weights[5], weights[11]], axis=-1)
        weights = [kernel, recurrent_kernel, bias]

    if layer.__class__.__name__ == 'ConvLSTM2D':
      if len(weights) == 12:
        kernel = np.concatenate(
            [weights[0], weights[6], weights[3], weights[9]], axis=-1)
        recurrent_kernel = np.concatenate(
            [weights[1], weights[7], weights[4], weights[10]], axis=-1)
        bias = np.concatenate(
            [weights[2], weights[8], weights[5], weights[11]], axis=-1)
        if layer.data_format == 'channels_first':
          # old: (filters, stack_size, kernel_rows, kernel_cols)
          # new: (kernel_rows, kernel_cols, stack_size, filters)
          kernel = np.transpose(kernel, (2, 3, 1, 0))
          recurrent_kernel = np.transpose(recurrent_kernel, (2, 3, 1, 0))
        weights = [kernel, recurrent_kernel, bias]

  conv_layers = ['Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', 'ConvLSTM2D']
  if layer.__class__.__name__ in conv_layers:
    if original_backend == 'theano':
      weights[0] = conv_utils.convert_kernel(weights[0])
      if layer.__class__.__name__ == 'ConvLSTM2D':
        weights[1] = conv_utils.convert_kernel(weights[1])
    if K.int_shape(layer.weights[0]) != weights[0].shape:
      weights[0] = np.transpose(weights[0], (3, 2, 0, 1))
      if layer.__class__.__name__ == 'ConvLSTM2D':
        weights[1] = np.transpose(weights[1], (3, 2, 0, 1))

  # convert CuDNN layers
  return _convert_rnn_weights(layer, weights)
コード例 #5
0
def preprocess_weights_for_loading(layer,
                                   weights,
                                   original_keras_version=None,
                                   original_backend=None):
    """Preprocess layer weights between different Keras formats.

  Converts layers weights from Keras 1 format to Keras 2 and also weights of
  CuDNN layers in Keras 2.

  Arguments:
      layer: Layer instance.
      weights: List of weights values (Numpy arrays).
      original_keras_version: Keras version for the weights, as a string.
      original_backend: Keras backend the weights were trained with,
          as a string.

  Returns:
      A list of weights values (Numpy arrays).
  """
    def convert_nested_bidirectional(weights):
        """Converts layers nested in `Bidirectional` wrapper.

    This function uses `preprocess_weights_for_loading()` for converting
    layers.

    Arguments:
        weights: List of weights values (Numpy arrays).

    Returns:
        A list of weights values (Numpy arrays).
    """
        num_weights_per_layer = len(weights) // 2
        forward_weights = preprocess_weights_for_loading(
            layer.forward_layer, weights[:num_weights_per_layer],
            original_keras_version, original_backend)
        backward_weights = preprocess_weights_for_loading(
            layer.backward_layer, weights[num_weights_per_layer:],
            original_keras_version, original_backend)
        return forward_weights + backward_weights

    def convert_nested_time_distributed(weights):
        """Converts layers nested in `TimeDistributed` wrapper.

    This function uses `preprocess_weights_for_loading()` for converting nested
    layers.

    Arguments:
        weights: List of weights values (Numpy arrays).

    Returns:
        A list of weights values (Numpy arrays).
    """
        return preprocess_weights_for_loading(layer.layer, weights,
                                              original_keras_version,
                                              original_backend)

    def convert_nested_model(weights):
        """Converts layers nested in `Model` or `Sequential`.

    This function uses `preprocess_weights_for_loading()` for converting nested
    layers.

    Arguments:
        weights: List of weights values (Numpy arrays).

    Returns:
        A list of weights values (Numpy arrays).
    """
        trainable_weights = weights[:len(layer.trainable_weights)]
        non_trainable_weights = weights[len(layer.trainable_weights):]

        new_trainable_weights = []
        new_non_trainable_weights = []

        for sublayer in layer.layers:
            num_trainable_weights = len(sublayer.trainable_weights)
            num_non_trainable_weights = len(sublayer.non_trainable_weights)
            if sublayer.weights:
                preprocessed = preprocess_weights_for_loading(
                    layer=sublayer,
                    weights=(
                        trainable_weights[:num_trainable_weights] +
                        non_trainable_weights[:num_non_trainable_weights]),
                    original_keras_version=original_keras_version,
                    original_backend=original_backend)
                new_trainable_weights.extend(
                    preprocessed[:num_trainable_weights])
                new_non_trainable_weights.extend(
                    preprocessed[num_trainable_weights:])

                trainable_weights = trainable_weights[num_trainable_weights:]
                non_trainable_weights = non_trainable_weights[
                    num_non_trainable_weights:]

        return new_trainable_weights + new_non_trainable_weights

    # Convert layers nested in Bidirectional/Model/Sequential.
    # Both transformation should be ran for both Keras 1->2 conversion
    # and for conversion of CuDNN layers.
    if layer.__class__.__name__ == 'Bidirectional':
        weights = convert_nested_bidirectional(weights)
    if layer.__class__.__name__ == 'TimeDistributed':
        weights = convert_nested_time_distributed(weights)
    elif layer.__class__.__name__ in ['Model', 'Sequential']:
        weights = convert_nested_model(weights)

    if original_keras_version == '1':
        if layer.__class__.__name__ == 'TimeDistributed':
            weights = preprocess_weights_for_loading(layer.layer, weights,
                                                     original_keras_version,
                                                     original_backend)

        if layer.__class__.__name__ == 'Conv1D':
            shape = weights[0].shape
            # Handle Keras 1.1 format
            if shape[:2] != (layer.kernel_size[0],
                             1) or shape[3] != layer.filters:
                # Legacy shape:
                # (filters, input_dim, filter_length, 1)
                assert shape[0] == layer.filters and shape[2:] == (
                    layer.kernel_size[0], 1)
                weights[0] = np.transpose(weights[0], (2, 3, 1, 0))
            weights[0] = weights[0][:, 0, :, :]

        if layer.__class__.__name__ == 'Conv2D':
            if layer.data_format == 'channels_first':
                # old: (filters, stack_size, kernel_rows, kernel_cols)
                # new: (kernel_rows, kernel_cols, stack_size, filters)
                weights[0] = np.transpose(weights[0], (2, 3, 1, 0))

        if layer.__class__.__name__ == 'Conv2DTranspose':
            if layer.data_format == 'channels_last':
                # old: (kernel_rows, kernel_cols, stack_size, filters)
                # new: (kernel_rows, kernel_cols, filters, stack_size)
                weights[0] = np.transpose(weights[0], (0, 1, 3, 2))
            if layer.data_format == 'channels_first':
                # old: (filters, stack_size, kernel_rows, kernel_cols)
                # new: (kernel_rows, kernel_cols, filters, stack_size)
                weights[0] = np.transpose(weights[0], (2, 3, 0, 1))

        if layer.__class__.__name__ == 'Conv3D':
            if layer.data_format == 'channels_first':
                # old: (filters, stack_size, ...)
                # new: (..., stack_size, filters)
                weights[0] = np.transpose(weights[0], (2, 3, 4, 1, 0))

        if layer.__class__.__name__ == 'GRU':
            if len(weights) == 9:
                kernel = np.concatenate([weights[0], weights[3], weights[6]],
                                        axis=-1)
                recurrent_kernel = np.concatenate(
                    [weights[1], weights[4], weights[7]], axis=-1)
                bias = np.concatenate([weights[2], weights[5], weights[8]],
                                      axis=-1)
                weights = [kernel, recurrent_kernel, bias]

        if layer.__class__.__name__ == 'LSTM':
            if len(weights) == 12:
                # old: i, c, f, o
                # new: i, f, c, o
                kernel = np.concatenate(
                    [weights[0], weights[6], weights[3], weights[9]], axis=-1)
                recurrent_kernel = np.concatenate(
                    [weights[1], weights[7], weights[4], weights[10]], axis=-1)
                bias = np.concatenate(
                    [weights[2], weights[8], weights[5], weights[11]], axis=-1)
                weights = [kernel, recurrent_kernel, bias]

        if layer.__class__.__name__ == 'ConvLSTM2D':
            if len(weights) == 12:
                kernel = np.concatenate(
                    [weights[0], weights[6], weights[3], weights[9]], axis=-1)
                recurrent_kernel = np.concatenate(
                    [weights[1], weights[7], weights[4], weights[10]], axis=-1)
                bias = np.concatenate(
                    [weights[2], weights[8], weights[5], weights[11]], axis=-1)
                if layer.data_format == 'channels_first':
                    # old: (filters, stack_size, kernel_rows, kernel_cols)
                    # new: (kernel_rows, kernel_cols, stack_size, filters)
                    kernel = np.transpose(kernel, (2, 3, 1, 0))
                    recurrent_kernel = np.transpose(recurrent_kernel,
                                                    (2, 3, 1, 0))
                weights = [kernel, recurrent_kernel, bias]

    conv_layers = [
        'Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', 'ConvLSTM2D'
    ]
    if layer.__class__.__name__ in conv_layers:
        if original_backend == 'theano':
            weights[0] = conv_utils.convert_kernel(weights[0])
            if layer.__class__.__name__ == 'ConvLSTM2D':
                weights[1] = conv_utils.convert_kernel(weights[1])
        if K.int_shape(layer.weights[0]) != weights[0].shape:
            weights[0] = np.transpose(weights[0], (3, 2, 0, 1))
            if layer.__class__.__name__ == 'ConvLSTM2D':
                weights[1] = np.transpose(weights[1], (3, 2, 0, 1))

    # convert CuDNN layers
    return _convert_rnn_weights(layer, weights)
コード例 #6
0
def preprocess_weights_for_loading(layer,
                                   weights,
                                   original_keras_version=None,
                                   original_backend=None):
    """Converts layers weights from Keras 1 format to Keras 2.

  Arguments:
      layer: Layer instance.
      weights: List of weights values (Numpy arrays).
      original_keras_version: Keras version for the weights, as a string.
      original_backend: Keras backend the weights were trained with,
          as a string.

  Returns:
      A list of weights values (Numpy arrays).
  """
    if layer.__class__.__name__ == 'Bidirectional':
        num_weights_per_layer = len(weights) // 2
        forward_weights = preprocess_weights_for_loading(
            layer.forward_layer, weights[:num_weights_per_layer],
            original_keras_version, original_backend)
        backward_weights = preprocess_weights_for_loading(
            layer.backward_layer, weights[num_weights_per_layer:],
            original_keras_version, original_backend)
        weights = forward_weights + backward_weights

    if original_keras_version == '1':
        if layer.__class__.__name__ == 'TimeDistributed':
            weights = preprocess_weights_for_loading(layer.layer, weights,
                                                     original_keras_version,
                                                     original_backend)

        if layer.__class__.__name__ == 'Conv1D':
            shape = weights[0].shape
            # Handle Keras 1.1 format
            if shape[:2] != (layer.kernel_size[0],
                             1) or shape[3] != layer.filters:
                # Legacy shape:
                # (filters, input_dim, filter_length, 1)
                assert shape[0] == layer.filters and shape[2:] == (
                    layer.kernel_size[0], 1)
                weights[0] = np.transpose(weights[0], (2, 3, 1, 0))
            weights[0] = weights[0][:, 0, :, :]

        if layer.__class__.__name__ == 'Conv2D':
            if layer.data_format == 'channels_first':
                # old: (filters, stack_size, kernel_rows, kernel_cols)
                # new: (kernel_rows, kernel_cols, stack_size, filters)
                weights[0] = np.transpose(weights[0], (2, 3, 1, 0))

        if layer.__class__.__name__ == 'Conv2DTranspose':
            if layer.data_format == 'channels_last':
                # old: (kernel_rows, kernel_cols, stack_size, filters)
                # new: (kernel_rows, kernel_cols, filters, stack_size)
                weights[0] = np.transpose(weights[0], (0, 1, 3, 2))
            if layer.data_format == 'channels_first':
                # old: (filters, stack_size, kernel_rows, kernel_cols)
                # new: (kernel_rows, kernel_cols, filters, stack_size)
                weights[0] = np.transpose(weights[0], (2, 3, 0, 1))

        if layer.__class__.__name__ == 'Conv3D':
            if layer.data_format == 'channels_first':
                # old: (filters, stack_size, ...)
                # new: (..., stack_size, filters)
                weights[0] = np.transpose(weights[0], (2, 3, 4, 1, 0))

        if layer.__class__.__name__ == 'GRU':
            if len(weights) == 9:
                kernel = np.concatenate([weights[0], weights[3], weights[6]],
                                        axis=-1)
                recurrent_kernel = np.concatenate(
                    [weights[1], weights[4], weights[7]], axis=-1)
                bias = np.concatenate([weights[2], weights[5], weights[8]],
                                      axis=-1)
                weights = [kernel, recurrent_kernel, bias]

        if layer.__class__.__name__ == 'LSTM':
            if len(weights) == 12:
                # old: i, c, f, o
                # new: i, f, c, o
                kernel = np.concatenate(
                    [weights[0], weights[6], weights[3], weights[9]], axis=-1)
                recurrent_kernel = np.concatenate(
                    [weights[1], weights[7], weights[4], weights[10]], axis=-1)
                bias = np.concatenate(
                    [weights[2], weights[8], weights[5], weights[11]], axis=-1)
                weights = [kernel, recurrent_kernel, bias]

        if layer.__class__.__name__ == 'ConvLSTM2D':
            if len(weights) == 12:
                kernel = np.concatenate(
                    [weights[0], weights[6], weights[3], weights[9]], axis=-1)
                recurrent_kernel = np.concatenate(
                    [weights[1], weights[7], weights[4], weights[10]], axis=-1)
                bias = np.concatenate(
                    [weights[2], weights[8], weights[5], weights[11]], axis=-1)
                if layer.data_format == 'channels_first':
                    # old: (filters, stack_size, kernel_rows, kernel_cols)
                    # new: (kernel_rows, kernel_cols, stack_size, filters)
                    kernel = np.transpose(kernel, (2, 3, 1, 0))
                    recurrent_kernel = np.transpose(recurrent_kernel,
                                                    (2, 3, 1, 0))
                weights = [kernel, recurrent_kernel, bias]

        if layer.__class__.__name__ in ['Model', 'Sequential']:
            new_weights = []
            # trainable weights
            for sublayer in layer.layers:
                num_weights = len(sublayer.trainable_weights)
                if num_weights > 0:
                    new_weights.extend(
                        preprocess_weights_for_loading(
                            layer=sublayer,
                            weights=weights[:num_weights],
                            original_keras_version=original_keras_version,
                            original_backend=original_backend))
                    weights = weights[num_weights:]

            # non-trainable weights
            for sublayer in layer.layers:
                num_weights = len([
                    l for l in sublayer.weights
                    if l not in sublayer.trainable_weights
                ])
                if num_weights > 0:
                    new_weights.extend(
                        preprocess_weights_for_loading(
                            layer=sublayer,
                            weights=weights[:num_weights],
                            original_keras_version=original_keras_version,
                            original_backend=original_backend))
                    weights = weights[num_weights:]
            weights = new_weights

    conv_layers = [
        'Conv1D', 'Conv2D', 'Conv3D', 'Conv2DTranspose', 'ConvLSTM2D'
    ]
    if layer.__class__.__name__ in conv_layers:
        if original_backend == 'theano':
            weights[0] = conv_utils.convert_kernel(weights[0])
            if layer.__class__.__name__ == 'ConvLSTM2D':
                weights[1] = conv_utils.convert_kernel(weights[1])
        if K.int_shape(layer.weights[0]) != weights[0].shape:
            weights[0] = np.transpose(weights[0], (3, 2, 0, 1))
            if layer.__class__.__name__ == 'ConvLSTM2D':
                weights[1] = np.transpose(weights[1], (3, 2, 0, 1))

    return _convert_rnn_weights(layer, weights)
コード例 #7
0
def create_cnn_model(weights_path=None):
    # creates our cnn model
    #filters which total weights is “n*m*k*l” (Here the input has l=32 feature maps as inputs, k=64 feature maps as outputs)
    #Then there is a term called bias for each feature map. So, the total number of parameters are “(n*m*l+1)*k”.
    '''
    PARAMETERS
    https://towardsdatascience.com/understanding-and-calculating-the-number-of-parameters-in-convolution-neural-networks-cnns-fc88790d530d
    https://medium.com/@shashikachamod4u/calculate-output-size-and-number-of-trainable-parameters-in-a-convolution-layer-1d64cae6c009
    https://medium.com/@iamvarman/how-to-calculate-the-number-of-parameters-in-the-cnn-5bd55364d7ca
    https://cs231n.github.io/convolutional-networks/
    '''

    input = Input(shape=(1, IMG_WIDTH, IMG_HEIGHT))
    input_pad = ZeroPadding2D(padding=(3, 3))(input)

    conv1_1_3x3_s1 = Conv2D(32, (3, 3),
                            strides=(1, 1),
                            padding='same',
                            activation='relu',
                            name='conv1_1/3x3_s1',
                            kernel_regularizer=l2(l2_regulizer))(input_pad)
    conv1_2_3x3_s1 = Conv2D(
        32, (3, 3),
        strides=(1, 1),
        padding='same',
        activation='relu',
        name='conv1_2/3x3_s1',
        kernel_regularizer=l2(l2_regulizer))(conv1_1_3x3_s1)
    conv1_zero_pad = ZeroPadding2D(padding=(1, 1))(conv1_2_3x3_s1)
    pool1_helper = PoolHelper()(conv1_zero_pad)
    pool1_2_2x2_s1 = MaxPooling2D(pool_size=(2, 2),
                                  strides=(1, 1),
                                  padding='same',
                                  name='pool1/2x2_s1')(pool1_helper)
    pool1_norm1 = LRN(name='pool1/norm1')(pool1_2_2x2_s1)

    conv2_1_3x3_reduce = Conv2D(
        64, (1, 1),
        padding='same',
        activation='relu',
        name='conv2_1/3x3_reduce',
        kernel_regularizer=l2(l2_regulizer))(pool1_norm1)
    conv2_2_3x3 = Conv2D(
        64, (3, 3),
        padding='same',
        activation='relu',
        name='conv2_2/3x3',
        kernel_regularizer=l2(l2_regulizer))(conv2_1_3x3_reduce)
    conv2_norm2 = LRN(name='conv2/norm2')(conv2_2_3x3)
    conv2_zero_pad = ZeroPadding2D(padding=(1, 1))(conv2_norm2)
    pool2_helper = PoolHelper()(conv2_zero_pad)
    pool2_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                padding='same',
                                name='pool2/3x3_s2')(pool2_helper)

    conv3_1_3x3_s1 = Conv2D(128, (3, 3),
                            strides=(1, 1),
                            padding='same',
                            activation='relu',
                            name='conv3_1/3x3_s1',
                            kernel_regularizer=l2(l2_regulizer))(pool2_3x3_s2)
    conv3_2_3x3_s1 = Conv2D(
        128, (3, 3),
        strides=(1, 1),
        padding='same',
        activation='relu',
        name='conv3_2/3x3_s1',
        kernel_regularizer=l2(l2_regulizer))(conv3_1_3x3_s1)
    conv3_zero_pad = ZeroPadding2D(padding=(1, 1))(conv3_2_3x3_s1)
    pool3_helper = PoolHelper()(conv3_zero_pad)
    pool3_2_2x2_s1 = MaxPooling2D(pool_size=(2, 2),
                                  strides=(1, 1),
                                  padding='same',
                                  name='pool3/2x2_s1')(pool3_helper)
    pool3_norm1 = LRN(name='pool3/norm1')(pool3_2_2x2_s1)

    conv4_1_3x3_reduce = Conv2D(
        256, (1, 1),
        padding='same',
        activation='relu',
        name='conv4_1/3x3_reduce',
        kernel_regularizer=l2(l2_regulizer))(pool3_norm1)
    conv4_2_3x3 = Conv2D(
        256, (3, 3),
        padding='same',
        activation='relu',
        name='conv4_2/3x3',
        kernel_regularizer=l2(l2_regulizer))(conv4_1_3x3_reduce)
    conv4_norm2 = LRN(name='conv4/norm2')(conv4_2_3x3)
    conv4_zero_pad = ZeroPadding2D(padding=(1, 1))(conv4_norm2)
    pool4_helper = PoolHelper()(conv4_zero_pad)
    pool4_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                padding='same',
                                name='pool4/3x3_s2')(pool4_helper)

    conv5_1_3x3_s1 = Conv2D(512, (3, 3),
                            strides=(1, 1),
                            padding='same',
                            activation='relu',
                            name='conv5_1/3x3_s1',
                            kernel_regularizer=l2(l2_regulizer))(pool4_3x3_s2)
    conv5_2_3x3_s1 = Conv2D(
        512, (3, 3),
        strides=(1, 1),
        padding='same',
        activation='relu',
        name='conv5_2/3x3_s1',
        kernel_regularizer=l2(l2_regulizer))(conv5_1_3x3_s1)
    conv5_zero_pad = ZeroPadding2D(padding=(1, 1))(conv5_2_3x3_s1)
    pool5_helper = PoolHelper()(conv5_zero_pad)
    pool5_2_2x2_s1 = MaxPooling2D(pool_size=(2, 2),
                                  strides=(1, 1),
                                  padding='same',
                                  name='pool5/2x2_s1')(pool5_helper)
    pool5_norm1 = LRN(name='pool5/norm1')(pool5_2_2x2_s1)

    conv6_1_3x3_reduce = Conv2D(
        1024, (1, 1),
        padding='same',
        activation='relu',
        name='conv6_1/3x3_reduce',
        kernel_regularizer=l2(l2_regulizer))(pool5_norm1)
    conv6_2_3x3 = Conv2D(
        1024, (3, 3),
        padding='same',
        activation='relu',
        name='conv6_2/3x3',
        kernel_regularizer=l2(l2_regulizer))(conv6_1_3x3_reduce)
    conv6_norm2 = LRN(name='conv6/norm2')(conv6_2_3x3)
    conv6_zero_pad = ZeroPadding2D(padding=(1, 1))(conv6_norm2)
    pool6_helper = PoolHelper()(conv6_zero_pad)
    pool6_3x3_s2 = MaxPooling2D(pool_size=(3, 3),
                                strides=(2, 2),
                                padding='same',
                                name='pool6/3x3_s2')(pool6_helper)

    pool7_2x2_s1 = AveragePooling2D(pool_size=(2, 2),
                                    strides=(1, 1),
                                    name='pool7/2x2_s1')(pool6_3x3_s2)

    loss_flat = Flatten()(pool7_2x2_s1)
    pool7_drop_2x2_s1 = Dropout(rate=0.5)(loss_flat)
    loss_classifier = Dense(
        num_classes,
        name='loss3/classifier',
        kernel_regularizer=l2(l2_regulizer))(pool7_drop_2x2_s1)
    loss_classifier_act = Activation('softmax', name='prob')(loss_classifier)

    mynet = Model(inputs=input, outputs=[loss_classifier_act])

    if weights_path:
        mynet.load_weights(weights_path)

    if keras.backend.backend() == 'tensorflow':
        # convert the convolutional kernels for tensorflow
        ops = []
        for layer in mynet.layers:
            if layer.__class__.__name__ == 'Conv2D':
                original_w = K.get_value(layer.kernel)
                converted_w = convert_kernel(original_w)
                ops.append(tf.assign(layer.kernel, converted_w).op)
        K.get_session().run(ops)

    return mynet