Exemple #1
0
def instantiate_lenet_fix_weights():
    from tensorflow.python import keras
    from tensorflow.python.keras import layers
    model = keras.Sequential()

    model.add(
        layers.Conv2D(filters=6,
                      kernel_size=(3, 3),
                      activation='relu',
                      input_shape=(32, 32, 1),
                      kernel_initializer=MyInitializer(10.0)))
    model.add(layers.AveragePooling2D())
    model.add(
        layers.Conv2D(filters=16,
                      kernel_size=(3, 3),
                      activation='relu',
                      kernel_initializer=MyInitializer(20.0)))
    model.add(layers.AveragePooling2D())
    model.add(layers.Flatten())
    model.add(
        layers.Dense(units=120,
                     activation='relu',
                     kernel_initializer=MyInitializer(30.0)))
    model.add(
        layers.Dense(units=84,
                     activation='relu',
                     kernel_initializer=MyInitializer(0.4)))
    model.add(
        layers.Dense(units=10,
                     activation='softmax',
                     kernel_initializer=MyInitializer(5.5)))

    inp = keras.Input(shape=(32, 32, 1), dtype=np.float32)
    out = model(inp)
    return out, inp, model
Exemple #2
0
def LeNet5(input_shape, num_classes, pretrained_weights=None):
    input_image = layers.Input(shape=input_shape, name='input_1')
    x = layers.Conv2D(filters=6,
                      kernel_size=(3, 3),
                      activation=activations.relu)(input_image)
    x = layers.AveragePooling2D(pool_size=(2, 2))(x)
    x = layers.Conv2D(filters=16,
                      kernel_size=(3, 3),
                      activation=activations.relu)(x)
    x = layers.AveragePooling2D(pool_size=(2, 2))(x)
    x = layers.Conv2D(filters=16,
                      kernel_size=(3, 3),
                      activation=activations.relu)(x)
    x = layers.AveragePooling2D(pool_size=(2, 2))(x)
    x = layers.Flatten()(x)
    x = layers.Dense(units=120, activation=activations.relu)(x)
    x = layers.Dropout(0.5)(x)
    x = layers.Dense(units=84, activation=activations.relu)(x)
    x = layers.Dense(units=num_classes,
                     activation=activations.softmax,
                     name='predictions')(x)
    model = models.Model(input_image, x, name='lenet5')
    if pretrained_weights:
        model.load_weights(pretrained_weights)
    return model
Exemple #3
0
def create_models():
    input = layers.Input((32, 32, 3))
    x = input
    for i in range(3):
        x = conv_bn_relu(x, 64)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 128)
    x = layers.AveragePooling2D(2)(x)
    for i in range(3):
        x = conv_bn_relu(x, 256)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(10, activation="softmax")(x)

    return Model(input, x)
Exemple #4
0
 def transition_block(self,
                      x,
                      reduction,
                      name,
                      weight_decay=1e-4,
                      kernel_initializer='he_normal'):
     """A transition block.
     # Arguments
         x: input tensor.
         reduction: float, compression rate at transition layers.
         name: string, block label.
     # Returns
         output tensor for the block.
     """
     bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
     x = layers.BatchNormalization(axis=bn_axis,
                                   epsilon=1.001e-5,
                                   name=name + '_bn')(x)
     x = layers.Activation('relu', name=name + '_relu')(x)
     x = layers.Conv2D(int(K.int_shape(x)[bn_axis] * reduction),
                       1,
                       use_bias=False,
                       kernel_initializer=kernel_initializer,
                       kernel_regularizer=l2(weight_decay),
                       name=name + '_conv')(x)
     x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)
     return x
Exemple #5
0
def ResNet50(input_tensor=None,
             pooling=None,
             **kwargs):
    """Instantiates the ResNet50 architecture.
    # Arguments       
    # Returns
        A Keras model instance.
    """
    # Input arguments
    include_top = get_varargin(kwargs, 'include_top', True)
    nb_classes = get_varargin(kwargs, 'nb_classes', 1000)
    default_input_shape = _obtain_input_shape(None,
                                      default_size=224,
                                      min_size=197,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)
    input_shape = get_varargin(kwargs, 'input_shape', default_input_shape)
    if input_tensor is None:
        img_input = KL.Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = KL.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
            
    bn_axis = 3 if K.image_data_format() == 'channels_last' else 1        

    x = KL.ZeroPadding2D((3, 3))(img_input)
    x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = KL.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = KL.Activation('relu')(x)
    x = KL.MaxPooling2D((3, 3), strides=(2, 2))(x)
    
    for stage, nb_block in zip([2,3,4,5], [3,4,6,3]):
        for blk in range(nb_block):
            conv_block = True if blk == 0 else False
            strides = (2,2) if stage>2 and blk==0 else (1,1)           
            x = identity_block(x, stage = stage, block_id = blk + 1,
                               conv_block = conv_block, strides = strides)
            
    x = KL.AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = KL.Flatten()(x)
        x = KL.Dense(nb_classes, activation='softmax', name='fc1000')(x)
    else:
        if pooling == 'avg':
            x = KL.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = KL.GlobalMaxPooling2D()(x)
    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
        
    # Create model.
    model = Model(inputs, x, name='resnet50')
    return model
Exemple #6
0
    def small_densenet(self,
                       img_input_shape=(64, 64, 3),
                       blocks=[6, 12, 24, 16],
                       weight_decay=1e-4,
                       kernel_initializer='he_normal',
                       init_filters=None,
                       reduction=None,
                       growth_rate=None,
                       init_stride=None):
        img_input = layers.Input(shape=(img_input_shape))

        x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
        x = layers.Conv2D(init_filters,
                          3,
                          strides=init_stride,
                          use_bias=False,
                          kernel_initializer=kernel_initializer,
                          kernel_regularizer=l2(weight_decay),
                          name='conv1/conv')(x)
        x = layers.BatchNormalization(axis=3,
                                      epsilon=1.001e-5,
                                      name='conv1/bn')(x)
        x = layers.Activation('relu', name='conv1/relu')(x)
        x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
        x = layers.AveragePooling2D(3, strides=2, name='pool1')(x)

        for i, block in enumerate(blocks):
            scope_num_str = str(i + 2)
            x = self.dense_block(x,
                                 block,
                                 name='conv' + scope_num_str,
                                 growth_rate=growth_rate,
                                 weight_decay=weight_decay,
                                 kernel_initializer=kernel_initializer)
            if i != len(blocks) - 1:
                x = self.transition_block(
                    x,
                    reduction,
                    name='pool' + scope_num_str,
                    weight_decay=weight_decay,
                    kernel_initializer=kernel_initializer)
        x = layers.BatchNormalization(axis=3, epsilon=1.001e-5, name='bn')(x)
        x = layers.Activation('relu', name='relu')(x)

        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        x = layers.Dense(self.cat_max,
                         activation='softmax',
                         kernel_initializer=kernel_initializer,
                         name='fc')(x)

        model = Model(img_input, x)
        model.compile(optimizer=Adam(lr=self.lr),
                      loss='categorical_crossentropy',
                      metrics=['categorical_accuracy'])

        return model
Exemple #7
0
def transition_block(x, name, dropout_rate, reduction=0.5):
    x = layers.BatchNormalization(name=name + '_bn')(x)
    x = layers.Activation('relu', name=name + '_relu')(x)
    x = layers.Conv2D(int(x.get_shape().as_list()[-1] * reduction),
                      1,
                      padding='same',
                      name=name + '_conv')(x)
    x = layers.Dropout(rate=dropout_rate)(x)
    x = layers.AveragePooling2D(strides=2, name=name + '_pool')(x)
    return x
Exemple #8
0
def transition_block(x, reduction, name):
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_bn')(x)
    x = layers.Activation('relu', name=name + '_relu')(x)
    x = layers.Conv2D(int(backend.int_shape(x)[bn_axis] * reduction),
                      1,
                      use_bias=False,
                      name=name + '_conv')(x)
    x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)
    return x
Exemple #9
0
def average_pool1d(x, kernel_size=2, stride=2, padding='same'):
    """
    Args:
        x: input_tensor (N, L)
        kernel_size: int
        stride: int
        padding: same
    Returns: tensor (N, L//ks)
    """
    _x = tf.expand_dims(x, axis=2)
    _x = kl.AveragePooling2D((kernel_size, 1), (stride, 1), padding)(_x)
    _x = tf.squeeze(_x, axis=2)
    return _x
Exemple #10
0
    def _forward(self, level, inp, var_scope, is_train):
        # Upper branch
        up1 = inp
        up1 = self._modules['b1_' + str(level)](
            up1,
            256,
            256,
            var_scope=var_scope + "_ConvBlock_b1_level_%d" % level,
            is_train=is_train)

        # Lower branch
        low1 = layers.AveragePooling2D(pool_size=(2, 2),
                                       strides=2,
                                       name=var_scope +
                                       '_AvgPool2D_level_%d' % level)(inp)
        low1 = self._modules['b2_' + str(level)](
            low1,
            256,
            256,
            var_scope=var_scope + "_ConvBlock_b2_level_%d" % level,
            is_train=is_train)

        if level > 1:
            low2 = self._forward(level - 1,
                                 low1,
                                 var_scope=var_scope +
                                 '_forward_level_%d' % level,
                                 is_train=is_train)
        else:
            low2 = low1
            low2 = self._modules['b2_plus_' + str(level)](
                low2,
                256,
                256,
                var_scope=var_scope + "_ConvBlock_b2_plus_level_%d" % level,
                is_train=is_train)

        low3 = low2
        low3 = self._modules['b3_' + str(level)](
            low3,
            256,
            256,
            var_scope=var_scope + "_ConvBlock_b3_level_%d" % level,
            is_train=is_train)

        up2 = layers.UpSampling2D(size=(2, 2),
                                  name=var_scope + '_UpSample2D')(low3)

        return up1 + up2
Exemple #11
0
    def learn_inv_cov(self, var_scope='inv'):
        '''
        Unary precision matrix (inverse of the covariance matrix)
        '''
        self.learned_inv_covs = []
        self.learned_log_det_inv_covs = []

        for i in range(self.num_modules):
            h = tf.concat([self.logits[i], self.last_feature[i]], axis=-1)
            h = self.conv1_inv(h)
            h_32 = layers.AveragePooling2D(pool_size=(2, 2),
                                           strides=2,
                                           name=var_scope + '_AvgPool2D')(h)
            h_32 = self.conv2_inv(h_32)
            h_8 = layers.MaxPool2D(pool_size=(4, 4),
                                   strides=4,
                                   name=var_scope + '_MaxPool2D')(h_32)

            h_8 = tf.transpose(h_8, [0, 3, 1, 2])
            h_8 = tf.reshape(h_8,
                             shape=[tf.shape(h_8)[0], self.FLAGS.num_lmks, 64])
            h_8 = self.dense1(h_8)
            h_8 = self.dense2(h_8)
            h_8 = self.dense3(h_8)

            # N C 2 2 chol
            h_low = tf.contrib.distributions.fill_triangular(h_8)  # lower
            h_diag = tf.abs(tf.linalg.diag_part(h_low)) + 0.01
            log_h_diag = tf.math.log(h_diag)  # diagonal element >0
            self.h = tf.linalg.set_diag(h_low, h_diag)

            # N C 2 2
            learned_inv_cov = tf.matmul(self.h, self.h,
                                        transpose_b=True)  # lower*upper
            learned_log_det_inv_cov = 2 * tf.reduce_sum(log_h_diag, -1)

            self.learned_inv_covs.append(learned_inv_cov)
            self.learned_log_det_inv_covs.append(learned_log_det_inv_cov)

        return self.learned_inv_covs, self.learned_log_det_inv_covs
Exemple #12
0
 def new_transition_layer(self, input_tensor, name):
     '''This method build the transition layer to help the forward process with three operations: 
     BN,
     Conv2D,
     AveragePooling2D.
     '''
     filters = input_tensor.shape.as_list()[-1]
     x = layers.BatchNormalization(
         beta_regularizer=tf.keras.regularizers.l2(1e-4),
         gamma_regularizer=tf.keras.regularizers.l2(1e-4),
         name=name + '-bn')(input_tensor)
     x = layers.ReLU(name=name + '-relu')(x)
     x = layers.Conv2D(
         filters=int(0.5 * filters),
         kernel_size=1,
         padding='SAME',
         kernel_initializer=tf.keras.initializers.he_uniform(),
         kernel_regularizer=tf.keras.regularizers.l2(1e-4),
         name=name + '-conv')(x)
     x = layers.AveragePooling2D(pool_size=(2, 2),
                                 name=name + '-avg_pool')(x)
     return x
def transition_block(x, reduction, name):
    """A transition block.

  Arguments:
    x: input tensor.
    reduction: float, compression rate at transition layers.
    name: string, block label.

  Returns:
    output tensor for the block.
  """
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_bn')(x)
    x = layers.Activation('relu', name=name + '_relu')(x)
    x = layers.Conv2D(int(backend.int_shape(x)[bn_axis] * reduction),
                      1,
                      use_bias=False,
                      name=name + '_conv')(x)
    x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)
    return x
Exemple #14
0
def transition_block(x, reduction, name):
    """A transition block.

    # Arguments
        x: input tensor.
        reduction: float, compression rate at transition layers.
        name: string, block label.

    # Returns
        output tensor for the block.
    """
    bn_axis = 3
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_bn')(x)
    x = layers.Activation('relu', name=name + '_relu')(x)
    x = layers.Conv2D(int(backend.int_shape(x)[bn_axis] * reduction),
                      1,
                      use_bias=False,
                      kernel_regularizer=regularizers.l2(l2_reg),
                      name=name + '_conv')(x)
    conv_before_pool = x
    x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)
    return conv_before_pool, x
def _reduction_a_cell(ip, p, filters, block_id=None):
    """Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).

  Arguments:
    ip: Input tensor `x`
    p: Input tensor `p`
    filters: Number of output filters
    block_id: String block_id

  Returns:
    A Keras tensor
  """
    channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1

    with backend.name_scope('reduction_A_block_%s' % block_id):
        p = _adjust_block(p, ip, filters, block_id)

        h = layers.Activation('relu')(ip)
        h = layers.Conv2D(filters, (1, 1),
                          strides=(1, 1),
                          padding='same',
                          name='reduction_conv_1_%s' % block_id,
                          use_bias=False,
                          kernel_initializer='he_normal')(h)
        h = layers.BatchNormalization(axis=channel_dim,
                                      momentum=0.9997,
                                      epsilon=1e-3,
                                      name='reduction_bn_1_%s' % block_id)(h)
        h3 = layers.ZeroPadding2D(padding=imagenet_utils.correct_pad(h, 3),
                                  name='reduction_pad_1_%s' % block_id)(h)

        with backend.name_scope('block_1'):
            x1_1 = _separable_conv_block(h,
                                         filters, (5, 5),
                                         strides=(2, 2),
                                         block_id='reduction_left1_%s' %
                                         block_id)
            x1_2 = _separable_conv_block(p,
                                         filters, (7, 7),
                                         strides=(2, 2),
                                         block_id='reduction_right1_%s' %
                                         block_id)
            x1 = layers.add([x1_1, x1_2], name='reduction_add_1_%s' % block_id)

        with backend.name_scope('block_2'):
            x2_1 = layers.MaxPooling2D(
                (3, 3),
                strides=(2, 2),
                padding='valid',
                name='reduction_left2_%s' % block_id)(h3)
            x2_2 = _separable_conv_block(p,
                                         filters, (7, 7),
                                         strides=(2, 2),
                                         block_id='reduction_right2_%s' %
                                         block_id)
            x2 = layers.add([x2_1, x2_2], name='reduction_add_2_%s' % block_id)

        with backend.name_scope('block_3'):
            x3_1 = layers.AveragePooling2D(
                (3, 3),
                strides=(2, 2),
                padding='valid',
                name='reduction_left3_%s' % block_id)(h3)
            x3_2 = _separable_conv_block(p,
                                         filters, (5, 5),
                                         strides=(2, 2),
                                         block_id='reduction_right3_%s' %
                                         block_id)
            x3 = layers.add([x3_1, x3_2], name='reduction_add3_%s' % block_id)

        with backend.name_scope('block_4'):
            x4 = layers.AveragePooling2D(
                (3, 3),
                strides=(1, 1),
                padding='same',
                name='reduction_left4_%s' % block_id)(x1)
            x4 = layers.add([x2, x4])

        with backend.name_scope('block_5'):
            x5_1 = _separable_conv_block(x1,
                                         filters, (3, 3),
                                         block_id='reduction_left4_%s' %
                                         block_id)
            x5_2 = layers.MaxPooling2D(
                (3, 3),
                strides=(2, 2),
                padding='valid',
                name='reduction_right5_%s' % block_id)(h3)
            x5 = layers.add([x5_1, x5_2], name='reduction_add4_%s' % block_id)

        x = layers.concatenate([x2, x3, x4, x5],
                               axis=channel_dim,
                               name='reduction_concat_%s' % block_id)
        return x, ip
def _normal_a_cell(ip, p, filters, block_id=None):
    """Adds a Normal cell for NASNet-A (Fig. 4 in the paper).

  Arguments:
      ip: Input tensor `x`
      p: Input tensor `p`
      filters: Number of output filters
      block_id: String block_id

  Returns:
      A Keras tensor
  """
    channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1

    with backend.name_scope('normal_A_block_%s' % block_id):
        p = _adjust_block(p, ip, filters, block_id)

        h = layers.Activation('relu')(ip)
        h = layers.Conv2D(filters, (1, 1),
                          strides=(1, 1),
                          padding='same',
                          name='normal_conv_1_%s' % block_id,
                          use_bias=False,
                          kernel_initializer='he_normal')(h)
        h = layers.BatchNormalization(axis=channel_dim,
                                      momentum=0.9997,
                                      epsilon=1e-3,
                                      name='normal_bn_1_%s' % block_id)(h)

        with backend.name_scope('block_1'):
            x1_1 = _separable_conv_block(h,
                                         filters,
                                         kernel_size=(5, 5),
                                         block_id='normal_left1_%s' % block_id)
            x1_2 = _separable_conv_block(p,
                                         filters,
                                         block_id='normal_right1_%s' %
                                         block_id)
            x1 = layers.add([x1_1, x1_2], name='normal_add_1_%s' % block_id)

        with backend.name_scope('block_2'):
            x2_1 = _separable_conv_block(p,
                                         filters, (5, 5),
                                         block_id='normal_left2_%s' % block_id)
            x2_2 = _separable_conv_block(p,
                                         filters, (3, 3),
                                         block_id='normal_right2_%s' %
                                         block_id)
            x2 = layers.add([x2_1, x2_2], name='normal_add_2_%s' % block_id)

        with backend.name_scope('block_3'):
            x3 = layers.AveragePooling2D(
                (3, 3),
                strides=(1, 1),
                padding='same',
                name='normal_left3_%s' % (block_id))(h)
            x3 = layers.add([x3, p], name='normal_add_3_%s' % block_id)

        with backend.name_scope('block_4'):
            x4_1 = layers.AveragePooling2D(
                (3, 3),
                strides=(1, 1),
                padding='same',
                name='normal_left4_%s' % (block_id))(p)
            x4_2 = layers.AveragePooling2D(
                (3, 3),
                strides=(1, 1),
                padding='same',
                name='normal_right4_%s' % (block_id))(p)
            x4 = layers.add([x4_1, x4_2], name='normal_add_4_%s' % block_id)

        with backend.name_scope('block_5'):
            x5 = _separable_conv_block(h,
                                       filters,
                                       block_id='normal_left5_%s' % block_id)
            x5 = layers.add([x5, h], name='normal_add_5_%s' % block_id)

        x = layers.concatenate([p, x1, x2, x3, x4, x5],
                               axis=channel_dim,
                               name='normal_concat_%s' % block_id)
    return x, ip
def _adjust_block(p, ip, filters, block_id=None):
    """Adjusts the input `previous path` to match the shape of the `input`.

  Used in situations where the output number of filters needs to be changed.

  Arguments:
      p: Input tensor which needs to be modified
      ip: Input tensor whose shape needs to be matched
      filters: Number of output filters to be matched
      block_id: String block_id

  Returns:
      Adjusted Keras tensor
  """
    channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
    img_dim = 2 if backend.image_data_format() == 'channels_first' else -2

    ip_shape = backend.int_shape(ip)

    if p is not None:
        p_shape = backend.int_shape(p)

    with backend.name_scope('adjust_block'):
        if p is None:
            p = ip

        elif p_shape[img_dim] != ip_shape[img_dim]:
            with backend.name_scope('adjust_reduction_block_%s' % block_id):
                p = layers.Activation('relu',
                                      name='adjust_relu_1_%s' % block_id)(p)
                p1 = layers.AveragePooling2D(
                    (1, 1),
                    strides=(2, 2),
                    padding='valid',
                    name='adjust_avg_pool_1_%s' % block_id)(p)
                p1 = layers.Conv2D(filters // 2, (1, 1),
                                   padding='same',
                                   use_bias=False,
                                   name='adjust_conv_1_%s' % block_id,
                                   kernel_initializer='he_normal')(p1)

                p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
                p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
                p2 = layers.AveragePooling2D(
                    (1, 1),
                    strides=(2, 2),
                    padding='valid',
                    name='adjust_avg_pool_2_%s' % block_id)(p2)
                p2 = layers.Conv2D(filters // 2, (1, 1),
                                   padding='same',
                                   use_bias=False,
                                   name='adjust_conv_2_%s' % block_id,
                                   kernel_initializer='he_normal')(p2)

                p = layers.concatenate([p1, p2], axis=channel_dim)
                p = layers.BatchNormalization(axis=channel_dim,
                                              momentum=0.9997,
                                              epsilon=1e-3,
                                              name='adjust_bn_%s' %
                                              block_id)(p)

        elif p_shape[channel_dim] != filters:
            with backend.name_scope('adjust_projection_block_%s' % block_id):
                p = layers.Activation('relu')(p)
                p = layers.Conv2D(filters, (1, 1),
                                  strides=(1, 1),
                                  padding='same',
                                  name='adjust_conv_projection_%s' % block_id,
                                  use_bias=False,
                                  kernel_initializer='he_normal')(p)
                p = layers.BatchNormalization(axis=channel_dim,
                                              momentum=0.9997,
                                              epsilon=1e-3,
                                              name='adjust_bn_%s' %
                                              block_id)(p)
    return p
def block2(x,
          filters,
          kernel_size=3,
          stride=1,
          groups=1,
          base_width=64,
          radix=1,
          is_first=True,
          conv_shortcut=True,
          name=None):
  """A residual block.

  Arguments:
    x: input tensor.
    filters: integer, filters of the bottleneck layer.
    kernel_size: default 3, kernel size of the bottleneck layer.
    stride: default 1, stride of the first layer.
    groups: default 1, group size for grouped convolution.
    base_width: default 64, filters of the intermediate layer.
    radix: default 1, splits for split attention layer.
    is_first: default True, when to use pooling layer.
    conv_shortcut: default True, use convolution shortcut if True,
        otherwise identity shortcut.
    name: string, block label.

  Returns:
    Output tensor for the residual block.
  """
  bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

  width = int(filters * (base_width / 64.)) * groups
  expansion = 4
  old_stride = stride

  if stride > 1 or is_first:
    stride = 1

  if conv_shortcut:
    shortcut = layers.AveragePooling2D(
      pool_size=stride, strides=old_stride, name=name + '_0_pool')(x)
    shortcut = layers.Conv2D(
        filters * expansion,
        1,
        strides=1,
        use_bias=False,
        name=name + '_0_conv')(shortcut)
    shortcut = layers.BatchNormalization(
        axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut)
  else:
    shortcut = x

  x = layers.Conv2D(width, 1, use_bias=False, name=name + '_1_conv')(x)

  x = layers.BatchNormalization(
    axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)
  x = layers.Activation('relu', name=name + '_1_relu')(x)

  x = SplitAttentionConv2D(
      x,
      width,
      kernel_size=kernel_size,
      stride=stride,
      padding=(1, 1),
      groups=groups,
      radix=radix,
      use_bias=False,
      name=name + '_2_conv')

  if stride > 1 or is_first:
    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_0_pad')(x)
    x = layers.AveragePooling2D(
      pool_size=kernel_size, strides=old_stride, name=name + '_1_pool')(x)

  x = layers.Conv2D(
      filters * expansion, 1, use_bias=False, name=name + '_3_conv')(x)
  x = layers.BatchNormalization(
      axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x)

  x = layers.Add(name=name + '_add')([shortcut, x])
  x = layers.Activation('relu', name=name + '_out')(x)
  return x
Exemple #19
0
    return (x_train, y_train), (x_test, y_test)


# Define model architecture
lenet5 = Sequential()

lenet5.add(
    layers.Conv2D(6,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  activation="tanh",
                  input_shape=(28, 28, 1),
                  padding="same"))
lenet5.add(
    layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding="valid"))
lenet5.add(
    layers.Conv2D(16,
                  kernel_size=(5, 5),
                  strides=(1, 1),
                  activation="tanh",
                  padding="valid"))
lenet5.add(
    layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding="valid"))

lenet5.add(layers.Flatten())
lenet5.add(layers.Dense(units=120, activation="tanh"))
lenet5.add(layers.Dense(84, activation="tanh"))
lenet5.add(layers.Dense(10, activation="softmax"))

# Compile model
Exemple #20
0
def create_network(
    Zt,
    Ct,
    channels=1,
    upscaling_filters=[512, 256, 256, 128, 64],
    upscaling_ks=[5, 5, 5, 5, 5],
    upscaling_strides=[2, 2, 2, 2, 2, 1],
    encoder_filters=[64, 128, 256],
    encoder_ks=[3, 3, 3],
    resblock_filters=[256, 256, 256, 256, 256],
    resblock_ks=[3, 3, 3, 3, 3, 3],
    decoder_filters=[256, 256, 128, 64],
    decoder_ks=[5, 5, 5, 5, 5],
):
    with tf.name_scope("Gen"):

        Z = kl.Input((
            None,
            None,
            channels,
        ), tensor=Zt, name="Z")
        C = kl.Input((
            None,
            None,
            channels,
        ), tensor=Ct, name="C")
        layer = Z

        # Upscaling
        for l in range(len(upscaling_filters)):
            layer = kl.Conv2DTranspose(filters=upscaling_filters[l],
                                       kernel_size=upscaling_ks[l],
                                       padding="same",
                                       strides=upscaling_strides[l],
                                       kernel_regularizer=kr.l2(),
                                       activation="relu")(layer)
            layer = kl.BatchNormalization()(layer)

        layer = kl.Conv2DTranspose(filters=channels,
                                   kernel_size=upscaling_ks[-1],
                                   padding="same",
                                   strides=1,
                                   activation="relu",
                                   kernel_regularizer=kr.l2())(layer)

        layer = kl.concatenate([layer, C])

        # Encoder
        skips = [C]
        for l in range(len(encoder_filters)):
            layer = kl.Conv2D(
                filters=encoder_filters[l],
                kernel_size=encoder_ks[l],
                padding="same",
                activation="relu",
                # strides=2,
                kernel_regularizer=kr.l2())(layer)
            layer = kl.AveragePooling2D()(layer)
            layer = kl.BatchNormalization()(layer)
            skips.append(layer)

        # Residual blocks
        for l in range(len(resblock_filters)):
            layer = ResidualBlock(resblock_filters[l],
                                  nb_layers=3,
                                  kernel_size=resblock_ks[l])(layer)

        # Decoder
        layer = kl.Conv2DTranspose(filters=decoder_filters[0],
                                   kernel_size=decoder_ks[0],
                                   padding="same",
                                   strides=2,
                                   kernel_regularizer=kr.l2(),
                                   activation="relu")(layer)
        layer = kl.BatchNormalization()(layer)

        skips = skips[::-1]
        for l in range(1, len(decoder_filters)):
            layer = kl.concatenate([layer, skips[l]])
            layer = kl.Conv2DTranspose(filters=decoder_filters[l],
                                       kernel_size=decoder_ks[l],
                                       padding="same",
                                       strides=2,
                                       kernel_regularizer=kr.l2(),
                                       activation="relu")(layer)
            layer = kl.BatchNormalization()(layer)

        G_out = kl.Conv2D(filters=channels,
                          kernel_size=5,
                          activation="tanh",
                          padding="same",
                          kernel_regularizer=kr.l2())(layer)

        model = k.Model(inputs=[Z, C], outputs=G_out, name="G")
    return model
Exemple #21
0
    image = x[i]
    label = y[i].argmax()
    print(classnames[label])
    plt.imshow(image)
    plt.show()

"""# Create the first model"""

from tensorflow.python.keras import layers
from tensorflow.python.keras import Sequential

model = Sequential()

#convolutional base
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=train_generator.image_shape))
model.add(layers.AveragePooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.AveragePooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))

#dense layers
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(train_generator.num_classes, activation='softmax'))
model.summary()

model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

"""# Train the model"""
Exemple #22
0
def InceptionResNetV2(input_shape=None):
    input_shape = imagenet_utils.obtain_input_shape(
        input_shape,
        default_size=299,
        min_size=75,
        data_format=backend.image_data_format(),
        require_flatten=True)

    img_input = layers.Input(shape=input_shape)

    # Stem block: 35 x 35 x 192
    x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
    x = conv2d_bn(x, 32, 3, padding='valid')
    x = conv2d_bn(x, 64, 3)
    x = layers.MaxPooling2D(3, strides=2)(x)
    x = conv2d_bn(x, 80, 1, padding='valid')
    x = conv2d_bn(x, 192, 3, padding='valid')
    x = layers.MaxPooling2D(3, strides=2)(x)

    # Mixed 5b (Inception-A block): 35 x 35 x 320
    branch_0 = conv2d_bn(x, 96, 1)
    branch_1 = conv2d_bn(x, 48, 1)
    branch_1 = conv2d_bn(branch_1, 64, 5)
    branch_2 = conv2d_bn(x, 64, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3)
    branch_pool = layers.AveragePooling2D(3, strides=1, padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3
    x = layers.Concatenate(axis=channel_axis, name='mixed_5b')(branches)

    # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
    for block_idx in range(1, 11):
        x = inception_resnet_block(x,
                                   scale=0.17,
                                   block_type='block35',
                                   block_idx=block_idx)

    # Mixed 6a (Reduction-A block): 17 x 17 x 1088
    branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
    branch_1 = conv2d_bn(x, 256, 1)
    branch_1 = conv2d_bn(branch_1, 256, 3)
    branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
    branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_pool]
    x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches)

    # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
    for block_idx in range(1, 21):
        x = inception_resnet_block(x,
                                   scale=0.1,
                                   block_type='block17',
                                   block_idx=block_idx)

    # Mixed 7a (Reduction-B block): 8 x 8 x 2080
    branch_0 = conv2d_bn(x, 256, 1)
    branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
    branch_1 = conv2d_bn(x, 256, 1)
    branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
    branch_2 = conv2d_bn(x, 256, 1)
    branch_2 = conv2d_bn(branch_2, 288, 3)
    branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
    branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
    branches = [branch_0, branch_1, branch_2, branch_pool]
    x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches)

    # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
    for block_idx in range(1, 10):
        x = inception_resnet_block(x,
                                   scale=0.2,
                                   block_type='block8',
                                   block_idx=block_idx)
    x = inception_resnet_block(x,
                               scale=1.,
                               activation=None,
                               block_type='block8',
                               block_idx=10)

    # Final convolution block: 8 x 8 x 1536
    x = conv2d_bn(x, 1536, 1, name='conv_7b')

    # Classification block
    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    imagenet_utils.validate_activation('softmax', None)
    x = layers.Dense(NUM_CLASSES, activation='softmax', name='predictions')(x)

    # Create model.
    model = training.Model(img_input, x, name='inception_resnet_v2')

    return model
Exemple #23
0
    def forward(self, x, is_train=True):
        '''
        Forward pass through FAN network
        '''
        x = activations.relu(
            self.bn1(self.conv1(x),
                     training=is_train,
                     name='FAN_begin_bn1',
                     trainable=True))
        with tf.compat.v1.variable_scope("FAN_start"):
            x = layers.AveragePooling2D(pool_size=(2, 2),
                                        strides=2,
                                        name='AvgPool2D_layer1')(ConvBlock(
                                            x,
                                            64,
                                            128,
                                            var_scope="ConvBlock_layer1",
                                            is_train=is_train))

            x = ConvBlock(x,
                          128,
                          128,
                          var_scope="ConvBlock_layer2",
                          is_train=is_train)
            x = ConvBlock(x,
                          128,
                          256,
                          var_scope="ConvBlock_layer3",
                          is_train=is_train)

        previous = x

        outputs = []
        self.last_feature = []
        for i in range(self.num_modules):
            hg = self._modules['m' + str(i)].forward(previous,
                                                     var_scope='HG_%d' % i,
                                                     is_train=is_train)

            ll = hg
            ll = self._modules['top_m_' + str(i)](
                ll,
                256,
                256,
                var_scope="ConvBlock_top_m_HG_%d" % i,
                is_train=is_train)

            ll = activations.relu(self._modules['bn_end' + str(i)](
                self._modules['conv_last' + str(i)](ll),
                training=is_train,
                name='bn_end_HG_%d' % i,
                trainable=True))

            # Predict heatmaps
            # tmp_out, output of each stage
            tmp_out = self._modules['l' + str(i)](ll)
            outputs.append(tmp_out)
            self.last_feature.append(ll)
            if i < self.num_modules - 1:
                # form previous, input to the next HG stage
                ll = self._modules['bl' + str(i)](ll)
                tmp_out_ = self._modules['al' + str(i)](tmp_out)
                previous = previous + ll + tmp_out_

            self.logits = outputs
        return outputs
def InceptionResNetV2(include_top=True,
                      weights='imagenet',
                      input_tensor=None,
                      input_shape=None,
                      pooling=None,
                      classes=1000,
                      classifier_activation='softmax',
                      **kwargs):
  """Instantiates the Inception-ResNet v2 architecture.
  Optionally loads weights pre-trained on ImageNet.
  Note that the data format convention used by the model is
  the one specified in your Keras config at `~/.keras/keras.json`.
  Caution: Be sure to properly pre-process your inputs to the application.
  Please see `applications.inception_resnet_v2.preprocess_input` for an example.
  Arguments:
    include_top: whether to include the fully-connected
      layer at the top of the network.
    weights: one of `None` (random initialization),
      'imagenet' (pre-training on ImageNet),
      or the path to the weights file to be loaded.
    input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
      to use as image input for the model.
    input_shape: optional shape tuple, only to be specified
      if `include_top` is `False` (otherwise the input shape
      has to be `(299, 299, 3)` (with `'channels_last'` data format)
      or `(3, 299, 299)` (with `'channels_first'` data format).
      It should have exactly 3 inputs channels,
      and width and height should be no smaller than 75.
      E.g. `(150, 150, 3)` would be one valid value.
    pooling: Optional pooling mode for feature extraction
      when `include_top` is `False`.
      - `None` means that the output of the model will be
          the 4D tensor output of the last convolutional block.
      - `'avg'` means that global average pooling
          will be applied to the output of the
          last convolutional block, and thus
          the output of the model will be a 2D tensor.
      - `'max'` means that global max pooling will be applied.
    classes: optional number of classes to classify images
      into, only to be specified if `include_top` is `True`, and
      if no `weights` argument is specified.
    classifier_activation: A `str` or callable. The activation function to use
      on the "top" layer. Ignored unless `include_top=True`. Set
      `classifier_activation=None` to return the logits of the "top" layer.
    **kwargs: For backwards compatibility only.
  Returns:
    A `keras.Model` instance.
  Raises:
    ValueError: in case of invalid argument for `weights`,
      or invalid input shape.
    ValueError: if `classifier_activation` is not `softmax` or `None` when
      using a pretrained top layer.
  """
  if 'layers' in kwargs:
    global layers
    layers = kwargs.pop('layers')
  if kwargs:
    raise ValueError('Unknown argument(s): %s' % (kwargs,))
  if not (weights in {'imagenet', None} or os.path.exists(weights)):
    raise ValueError('The `weights` argument should be either '
                     '`None` (random initialization), `imagenet` '
                     '(pre-training on ImageNet), '
                     'or the path to the weights file to be loaded.')

  if weights == 'imagenet' and include_top and classes != 1000:
    raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
                     ' as true, `classes` should be 1000')

  # Determine proper input shape
  input_shape = imagenet_utils.obtain_input_shape(
      input_shape,
      default_size=299,
      min_size=75,
      data_format=backend.image_data_format(),
      require_flatten=include_top,
      weights=weights)

  if input_tensor is None:
    img_input = layers.Input(shape=input_shape)
  else:
    if not backend.is_keras_tensor(input_tensor):
      img_input = layers.Input(tensor=input_tensor, shape=input_shape)
    else:
      img_input = input_tensor

  # Stem block: 35 x 35 x 192
  x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
  x = conv2d_bn(x, 32, 3, padding='valid')
  x = conv2d_bn(x, 64, 3)
  x = layers.MaxPooling2D(3, strides=2)(x)
  x = conv2d_bn(x, 80, 1, padding='valid')
  x = conv2d_bn(x, 192, 3, padding='valid')
  x = layers.MaxPooling2D(3, strides=2)(x)

  # Mixed 5b (Inception-A block): 35 x 35 x 320
  branch_0 = conv2d_bn(x, 96, 1)
  branch_1 = conv2d_bn(x, 48, 1)
  branch_1 = conv2d_bn(branch_1, 64, 5)
  branch_2 = conv2d_bn(x, 64, 1)
  branch_2 = conv2d_bn(branch_2, 96, 3)
  branch_2 = conv2d_bn(branch_2, 96, 3)
  branch_pool = layers.AveragePooling2D(3, strides=1, padding='same')(x)
  branch_pool = conv2d_bn(branch_pool, 64, 1)
  branches = [branch_0, branch_1, branch_2, branch_pool]
  channel_axis = 1 if backend.image_data_format() == 'channels_first' else 3
  x = layers.Concatenate(axis=channel_axis, name='mixed_5b')(branches)

  # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
  for block_idx in range(1, 11):
    x = inception_resnet_block(
        x, scale=0.17, block_type='block35', block_idx=block_idx)

  # Mixed 6a (Reduction-A block): 17 x 17 x 1088
  branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
  branch_1 = conv2d_bn(x, 256, 1)
  branch_1 = conv2d_bn(branch_1, 256, 3)
  branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
  branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
  branches = [branch_0, branch_1, branch_pool]
  x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches)

  # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
  for block_idx in range(1, 21):
    x = inception_resnet_block(
        x, scale=0.1, block_type='block17', block_idx=block_idx)

  # Mixed 7a (Reduction-B block): 8 x 8 x 2080
  branch_0 = conv2d_bn(x, 256, 1)
  branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
  branch_1 = conv2d_bn(x, 256, 1)
  branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
  branch_2 = conv2d_bn(x, 256, 1)
  branch_2 = conv2d_bn(branch_2, 288, 3)
  branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
  branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
  branches = [branch_0, branch_1, branch_2, branch_pool]
  x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches)

  # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
  for block_idx in range(1, 10):
    x = inception_resnet_block(
        x, scale=0.2, block_type='block8', block_idx=block_idx)
  x = inception_resnet_block(
      x, scale=1., activation=None, block_type='block8', block_idx=10)

  # Final convolution block: 8 x 8 x 1536
  x = conv2d_bn(x, 1536, 1, name='conv_7b')

  if include_top:
    # Classification block
    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    imagenet_utils.validate_activation(classifier_activation, weights)
    x = layers.Dense(classes, activation=classifier_activation,
                     name='predictions')(x)
  else:
    if pooling == 'avg':
      x = layers.GlobalAveragePooling2D()(x)
    elif pooling == 'max':
      x = layers.GlobalMaxPooling2D()(x)

  # Ensure that the model takes into account
  # any potential predecessors of `input_tensor`.
  if input_tensor is not None:
    inputs = layer_utils.get_source_inputs(input_tensor)
  else:
    inputs = img_input

  # Create model.
  model = training.Model(inputs, x, name='inception_resnet_v2')

  # Load weights.
  if weights == 'imagenet':
    if include_top:
      fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
      weights_path = data_utils.get_file(
          fname,
          BASE_WEIGHT_URL + fname,
          cache_subdir='models',
          file_hash='e693bd0210a403b3192acc6073ad2e96')
    else:
      fname = ('inception_resnet_v2_weights_'
               'tf_dim_ordering_tf_kernels_notop.h5')
      weights_path = data_utils.get_file(
          fname,
          BASE_WEIGHT_URL + fname,
          cache_subdir='models',
          file_hash='d19885ff4a710c122648d3b5c3b684e4')
    model.load_weights(weights_path)
  elif weights is not None:
    model.load_weights(weights)

  return model
Exemple #25
0
    def assemble_layers(self):
        from tensorflow.python.keras import backend, layers
        from tensorflow.python.keras.applications import imagenet_utils

        include_top = True
        weights = None
        pooling = None
        classes = 1
        if nnstate.FLAGS.PRED_SIZE != 2:
            err('bad')
        classifier_activation = 'sigmoid'

        x = conv2d_bn(self.inputs, 32, 3, strides=2, padding='valid')
        x = conv2d_bn(x, 32, 3, padding='valid')
        x = conv2d_bn(x, 64, 3)
        x = layers.MaxPooling2D(3, strides=2)(x)
        x = conv2d_bn(x, 80, 1, padding='valid')
        x = conv2d_bn(x, 192, 3, padding='valid')
        x = layers.MaxPooling2D(3, strides=2)(x)

        # Mixed 5b (Inception-A block): 35 x 35 x 320
        branch_0 = conv2d_bn(x, 96, 1)
        branch_1 = conv2d_bn(x, 48, 1)
        branch_1 = conv2d_bn(branch_1, 64, 5)
        branch_2 = conv2d_bn(x, 64, 1)
        branch_2 = conv2d_bn(branch_2, 96, 3)
        branch_2 = conv2d_bn(branch_2, 96, 3)
        branch_pool = layers.AveragePooling2D(3, strides=1, padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 64, 1)
        branches = [branch_0, branch_1, branch_2, branch_pool]
        channel_axis = 1 if backend.image_data_format(
        ) == 'channels_first' else 3
        x = layers.Concatenate(axis=channel_axis, name='mixed_5b')(branches)

        # 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
        for block_idx in range(1, 11):
            x = inception_resnet_block(x,
                                       scale=0.17,
                                       block_type='block35',
                                       block_idx=block_idx)

        # Mixed 6a (Reduction-A block): 17 x 17 x 1088
        branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
        branch_1 = conv2d_bn(x, 256, 1)
        branch_1 = conv2d_bn(branch_1, 256, 3)
        branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
        branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
        branches = [branch_0, branch_1, branch_pool]
        x = layers.Concatenate(axis=channel_axis, name='mixed_6a')(branches)

        # 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
        for block_idx in range(1, 21):
            x = inception_resnet_block(x,
                                       scale=0.1,
                                       block_type='block17',
                                       block_idx=block_idx)

        # Mixed 7a (Reduction-B block): 8 x 8 x 2080
        branch_0 = conv2d_bn(x, 256, 1)
        branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
        branch_1 = conv2d_bn(x, 256, 1)
        branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
        branch_2 = conv2d_bn(x, 256, 1)
        branch_2 = conv2d_bn(branch_2, 288, 3)
        branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
        branch_pool = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
        branches = [branch_0, branch_1, branch_2, branch_pool]
        x = layers.Concatenate(axis=channel_axis, name='mixed_7a')(branches)

        # 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
        for block_idx in range(1, 10):
            x = inception_resnet_block(x,
                                       scale=0.2,
                                       block_type='block8',
                                       block_idx=block_idx)
        x = inception_resnet_block(x,
                                   scale=1.,
                                   activation=None,
                                   block_type='block8',
                                   block_idx=10)

        # Final convolution block: 8 x 8 x 1536
        x = conv2d_bn(x, 1536, 1, name='conv_7b')

        if include_top:
            # Classification block
            x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
            imagenet_utils.validate_activation(classifier_activation, weights)
            x = layers.Dense(classes,
                             activation=classifier_activation,
                             name='predictions')(x)
        else:
            if pooling == 'avg':
                x = layers.GlobalAveragePooling2D()(x)
            elif pooling == 'max':
                x = layers.GlobalMaxPooling2D()(x)

        return x
Exemple #26
0
def InceptionV3(input_shape=None):
    input_shape = imagenet_utils.obtain_input_shape(
        input_shape,
        default_size=299,
        min_size=75,
        data_format=backend.image_data_format(),
        require_flatten=True)

    img_input = layers.Input(shape=input_shape)

    if backend.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = 3

    x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
    x = conv2d_bn(x, 32, 3, 3, padding='valid')
    x = conv2d_bn(x, 64, 3, 3)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv2d_bn(x, 80, 1, 1, padding='valid')
    x = conv2d_bn(x, 192, 3, 3, padding='valid')
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    # mixed 0: 35 x 35 x 256
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed0')

    # mixed 1: 35 x 35 x 288
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed1')

    # mixed 2: 35 x 35 x 288
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed2')

    # mixed 3: 17 x 17 x 768
    branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl,
                             96,
                             3,
                             3,
                             strides=(2, 2),
                             padding='valid')

    branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
    x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed3')

    # mixed 4: 17 x 17 x 768
    branch1x1 = conv2d_bn(x, 192, 1, 1)

    branch7x7 = conv2d_bn(x, 128, 1, 1)
    branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
    branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = conv2d_bn(x, 128, 1, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed4')

    # mixed 5, 6: 17 x 17 x 768
    for i in range(2):
        branch1x1 = conv2d_bn(x, 192, 1, 1)

        branch7x7 = conv2d_bn(x, 160, 1, 1)
        branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
        branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

        branch7x7dbl = conv2d_bn(x, 160, 1, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
        x = layers.concatenate(
            [branch1x1, branch7x7, branch7x7dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(5 + i))

    # mixed 7: 17 x 17 x 768
    branch1x1 = conv2d_bn(x, 192, 1, 1)

    branch7x7 = conv2d_bn(x, 192, 1, 1)
    branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
    branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = conv2d_bn(x, 192, 1, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed7')

    # mixed 8: 8 x 8 x 1280
    branch3x3 = conv2d_bn(x, 192, 1, 1)
    branch3x3 = conv2d_bn(branch3x3,
                          320,
                          3,
                          3,
                          strides=(2, 2),
                          padding='valid')

    branch7x7x3 = conv2d_bn(x, 192, 1, 1)
    branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
    branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
    branch7x7x3 = conv2d_bn(branch7x7x3,
                            192,
                            3,
                            3,
                            strides=(2, 2),
                            padding='valid')

    branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
    x = layers.concatenate([branch3x3, branch7x7x3, branch_pool],
                           axis=channel_axis,
                           name='mixed8')

    # mixed 9: 8 x 8 x 2048
    for i in range(2):
        branch1x1 = conv2d_bn(x, 320, 1, 1)

        branch3x3 = conv2d_bn(x, 384, 1, 1)
        branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
        branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
        branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2],
                                       axis=channel_axis,
                                       name='mixed9_' + str(i))

        branch3x3dbl = conv2d_bn(x, 448, 1, 1)
        branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
        branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
        branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
        branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2],
                                          axis=channel_axis)

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
        x = layers.concatenate(
            [branch1x1, branch3x3, branch3x3dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(9 + i))
    # Classification block
    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    imagenet_utils.validate_activation('softmax', None)
    x = layers.Dense(NUM_CLASSES, activation='softmax', name='predictions')(x)

    # Create model.
    model = training.Model(img_input, x, name='inception_v3')

    return model
Exemple #27
0
def InceptionV3(
    include_top=True,
    weights='imagenet',
    input_tensor=None,
    input_shape=None,
    pooling=None,
    classes=1000,
    classifier_activation='softmax',
):
    """Instantiates the Inception v3 architecture.

  Reference paper:
  - [Rethinking the Inception Architecture for Computer Vision](
      http://arxiv.org/abs/1512.00567) (CVPR 2016)

  Optionally loads weights pre-trained on ImageNet.
  Note that the data format convention used by the model is
  the one specified in the `tf.keras.backend.image_data_format()`.

  Caution: Be sure to properly pre-process your inputs to the application.
  Please see `applications.inception_v3.preprocess_input` for an example.

  Arguments:
    include_top: Boolean, whether to include the fully-connected
      layer at the top, as the last layer of the network. Default to `True`.
    weights: One of `None` (random initialization),
      `imagenet` (pre-training on ImageNet),
      or the path to the weights file to be loaded. Default to `imagenet`.
    input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
      to use as image input for the model. `input_tensor` is useful for sharing
      inputs between multiple different networks. Default to None.
    input_shape: Optional shape tuple, only to be specified
      if `include_top` is False (otherwise the input shape
      has to be `(299, 299, 3)` (with `channels_last` data format)
      or `(3, 299, 299)` (with `channels_first` data format).
      It should have exactly 3 inputs channels,
      and width and height should be no smaller than 75.
      E.g. `(150, 150, 3)` would be one valid value.
      `input_shape` will be ignored if the `input_tensor` is provided.
    pooling: Optional pooling mode for feature extraction
      when `include_top` is `False`.
      - `None` (default) means that the output of the model will be
          the 4D tensor output of the last convolutional block.
      - `avg` means that global average pooling
          will be applied to the output of the
          last convolutional block, and thus
          the output of the model will be a 2D tensor.
      - `max` means that global max pooling will be applied.
    classes: optional number of classes to classify images
      into, only to be specified if `include_top` is True, and
      if no `weights` argument is specified. Default to 1000.
    classifier_activation: A `str` or callable. The activation function to use
      on the "top" layer. Ignored unless `include_top=True`. Set
      `classifier_activation=None` to return the logits of the "top" layer.

  Returns:
    A `keras.Model` instance.

  Raises:
    ValueError: in case of invalid argument for `weights`,
      or invalid input shape.
    ValueError: if `classifier_activation` is not `softmax` or `None` when
      using a pretrained top layer.
  """
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = imagenet_utils.obtain_input_shape(
        input_shape,
        default_size=299,
        min_size=75,
        data_format=backend.image_data_format(),
        require_flatten=include_top,
        weights=weights)

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if backend.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = 3

    x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
    x = conv2d_bn(x, 32, 3, 3, padding='valid')
    x = conv2d_bn(x, 64, 3, 3)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv2d_bn(x, 80, 1, 1, padding='valid')
    x = conv2d_bn(x, 192, 3, 3, padding='valid')
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    # mixed 0: 35 x 35 x 256
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed0')

    # mixed 1: 35 x 35 x 288
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed1')

    # mixed 2: 35 x 35 x 288
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
    x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed2')

    # mixed 3: 17 x 17 x 768
    branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl,
                             96,
                             3,
                             3,
                             strides=(2, 2),
                             padding='valid')

    branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
    x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed3')

    # mixed 4: 17 x 17 x 768
    branch1x1 = conv2d_bn(x, 192, 1, 1)

    branch7x7 = conv2d_bn(x, 128, 1, 1)
    branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
    branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = conv2d_bn(x, 128, 1, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed4')

    # mixed 5, 6: 17 x 17 x 768
    for i in range(2):
        branch1x1 = conv2d_bn(x, 192, 1, 1)

        branch7x7 = conv2d_bn(x, 160, 1, 1)
        branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
        branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

        branch7x7dbl = conv2d_bn(x, 160, 1, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
        branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
        x = layers.concatenate(
            [branch1x1, branch7x7, branch7x7dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(5 + i))

    # mixed 7: 17 x 17 x 768
    branch1x1 = conv2d_bn(x, 192, 1, 1)

    branch7x7 = conv2d_bn(x, 192, 1, 1)
    branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
    branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)

    branch7x7dbl = conv2d_bn(x, 192, 1, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
    branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)

    branch_pool = layers.AveragePooling2D((3, 3),
                                          strides=(1, 1),
                                          padding='same')(x)
    branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
    x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool],
                           axis=channel_axis,
                           name='mixed7')

    # mixed 8: 8 x 8 x 1280
    branch3x3 = conv2d_bn(x, 192, 1, 1)
    branch3x3 = conv2d_bn(branch3x3,
                          320,
                          3,
                          3,
                          strides=(2, 2),
                          padding='valid')

    branch7x7x3 = conv2d_bn(x, 192, 1, 1)
    branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
    branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
    branch7x7x3 = conv2d_bn(branch7x7x3,
                            192,
                            3,
                            3,
                            strides=(2, 2),
                            padding='valid')

    branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
    x = layers.concatenate([branch3x3, branch7x7x3, branch_pool],
                           axis=channel_axis,
                           name='mixed8')

    # mixed 9: 8 x 8 x 2048
    for i in range(2):
        branch1x1 = conv2d_bn(x, 320, 1, 1)

        branch3x3 = conv2d_bn(x, 384, 1, 1)
        branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
        branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
        branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2],
                                       axis=channel_axis,
                                       name='mixed9_' + str(i))

        branch3x3dbl = conv2d_bn(x, 448, 1, 1)
        branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
        branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
        branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
        branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2],
                                          axis=channel_axis)

        branch_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
        branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
        x = layers.concatenate(
            [branch1x1, branch3x3, branch3x3dbl, branch_pool],
            axis=channel_axis,
            name='mixed' + str(9 + i))
    if include_top:
        # Classification block
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        imagenet_utils.validate_activation(classifier_activation, weights)
        x = layers.Dense(classes,
                         activation=classifier_activation,
                         name='predictions')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = layer_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = training.Model(inputs, x, name='inception_v3')

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            weights_path = data_utils.get_file(
                'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
        else:
            weights_path = data_utils.get_file(
                'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                file_hash='bcbd6486424b2319ff4ef7d526e38f63')
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model
Exemple #28
0
def DashNet(input_tensor=None,
            input_shape=None,
            pooling='avg',
            classes=2,
            classifier_activation='softmax'):
    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if backend.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = 3

    # customize model structure
    x = conv2d_bn(img_input, 32, 15, 3, strides=(3, 1), padding='valid')
    x = conv2d_bn(x, 32, 15, 3, padding='same')
    x = conv2d_bn(x, 64, 15, 3, padding='same')
    x = layers.MaxPooling2D(pool_size=(15, 3), strides=(5, 1))(x)

    x = conv2d_bn(x, 80, 1, 1, padding='same')
    x = conv2d_bn(x, 192, 15, 3, padding='same')
    x = layers.MaxPooling2D(pool_size=(15, 3), strides=(5, 2))(x)

    # mixed 0:
    branch1x1 = conv2d_bn(x, 64, 1, 1)

    branch5x5 = conv2d_bn(x, 48, 1, 1)
    branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)

    branch3x3dbl = conv2d_bn(x, 64, 1, 1)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
    branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)

    branch_avg_pool = layers.AveragePooling2D((3, 3),
                                              strides=(1, 1),
                                              padding='same')(x)
    branch_avg_pool = conv2d_bn(branch_avg_pool, 32, 1, 1)

    x = layers.concatenate(
        [branch1x1, branch5x5, branch3x3dbl, branch_avg_pool],
        axis=channel_axis,
        name='mixed0')

    if pooling == 'avg':
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    elif pooling == 'max':
        x = layers.GlobalMaxPooling2D(name='max_pool')(x)
    x = layers.Dense(classes,
                     activation=classifier_activation,
                     name='predictions')(x)

    if input_tensor is not None:
        inputs = layer_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = training.Model(inputs, x, name='dashnet')

    return model
Exemple #29
0
from tensorflow.python.keras import layers, Sequential
from tensorflow.python.keras.utils import plot_model

model = Sequential()
model.add(
    layers.Conv2D(filters=6,
                  kernel_size=(5, 5),
                  activation='relu',
                  input_shape=(32, 32, 1),
                  strides=(1, 1)))
model.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2)))

model.add(
    layers.Conv2D(filters=16,
                  kernel_size=(5, 5),
                  activation='relu',
                  strides=(1, 1)))
model.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2)))

model.add(layers.Flatten())

model.add(layers.Dense(units=120, activation='relu'))

model.add(layers.Dense(units=84, activation='relu'))

model.add(layers.Dense(units=10, activation='softmax'))

plot_model(model,
           to_file=f'{__file__.split(".py")[0]}.png',
           show_layer_names=True,
           show_shapes=True)
Exemple #30
0
def average_pool2d(x,
                   kernel_size=(2, 2),
                   strides=(2, 2),
                   padding='same'):
    return kl.AveragePooling2D(kernel_size, strides, padding)(x)