示例#1
0
def compile(input_shape):

  X_input = Input(input_shape)

  X = Conv2D(64,(7,7),strides=(1,1),padding='valid')(X_input)
  X = BatchNormalization()(X)
  X1 = Conv2D(64,(7,7),strides=(1,1),padding='valid')(X_input)
  X1 = BatchNormalization()(X1)
  X = layers.Maximum()([X,X1])
  X = Conv2D(64,(4,4),strides=(1,1),padding='valid',activation='relu')(X)

  X2 = Conv2D(160,(13,13),strides=(1,1),padding='valid')(X_input)
  X2 = BatchNormalization()(X2)
  X21 = Conv2D(160,(13,13),strides=(1,1),padding='valid')(X_input)
  X21 = BatchNormalization()(X21)
  X2 = layers.Maximum()([X2,X21])

  X3 = Conv2D(64,(3,3),strides=(1,1),padding='valid')(X)
  X3 = BatchNormalization()(X3)
  X31 =  Conv2D(64,(3,3),strides=(1,1),padding='valid')(X)
  X31 = BatchNormalization()(X31)
  X = layers.Maximum()([X3,X31])
  X = Conv2D(64,(2,2),strides=(1,1),padding='valid',activation='relu')(X)

  X = Concatenate()([X2,X])
  X = Conv2D(5,(21,21),strides=(1,1),padding='valid')(X)
  X = Activation('softmax')(X)

  model = Model(inputs = X_input, outputs = X)
  model.compile(optimizer='adam', loss='categorical_crossentropy')
  return model
示例#2
0
def create_maxout_model(input_shape):
    """
    Create a maxout model with three convolutional maxout layers and one dense
    maxout layer.

    Args:
        input_shape (tuple):
            shape of the images to run on; i.e. (rows, cols, channels)
    Returns:
        the compiled keras model, ready to be trained.
    """

    inputs = layers.Input(shape = input_shape, name='input')

    x = layers.Dropout(0.2, name='dropout_1')(inputs)

    # First maxout layer
    x = layers.Maximum(name='maxout_1')([
        layers.Conv2D(96, (8,8), activation='relu', padding='same', name='conv_1_{}'.format(i))(x)
        for i in range(2)
    ])
    x = layers.MaxPool2D(name='maxpool_1')(x)

    x = layers.Dropout(0.2, name='dropout_2')(x)

    # Second maxout layer
    x = layers.Maximum(name='maxout_2')([
        layers.Conv2D(192, (8,8), activation='relu', padding='same', name='conv_2_{}'.format(i))(x)
        for i in range(2)
    ])
    x = layers.MaxPool2D(name='maxpool_2')(x)

    x = layers.Dropout(0.2, name='dropout_3')(x)

    # Third maxout layer
    x = layers.Maximum(name='maxout_3')([
        layers.Conv2D(192, (5,5), activation='relu', padding='same', name='conv_3_{}'.format(i))(x)
        for i in range(2)
    ])
    x = layers.MaxPool2D(name='maxpool_3')(x)

    x = layers.Flatten(name='flatten')(x)

    x = layers.Dropout(0.2, name='dropout_4')(x)

    # Dense maxout layer
    x = layers.Maximum(name='maxout_5')([
        layers.Dense(500, activation='relu', name='dense_1_{}'.format(i))(x)
        for i in range(5)
    ])

    x = layers.Dropout(0.2, name='dropout_5')(x)
    predictions = layers.Dense(10, activation='softmax', name='dense_2')(x)

    model = Model(inputs = inputs, outputs = predictions)

    model.compile(loss='categorical_crossentropy',
                  optimizer = optimizers.adadelta(),
                  metrics=['accuracy'])
    return model
示例#3
0
def two_path(x_input):
  
    x = Conv2D(64,(7,7),strides=(1,1),padding='valid')(x_input)
    x = BatchNormalization()(x)
    x1 = Conv2D(64,(7,7),strides=(1,1),padding='valid')(x_input)
    x1 = BatchNormalization()(x1)
    x = layers.Maximum()([x,x1])
    x = Conv2D(64,(4,4),strides=(1,1),padding='valid',activation='relu')(x)

    x2 = Conv2D(160,(13,13),strides=(1,1),padding='valid')(x_input)
    x2 = BatchNormalization()(x2)
    x21 = Conv2D(160,(13,13),strides=(1,1),padding='valid')(x_input)
    x21 = BatchNormalization()(x21)
    x2 = layers.Maximum()([x2,x21])

    x3 = Conv2D(64,(3,3),strides=(1,1),padding='valid')(x)
    x3 = BatchNormalization()(x3)
    x31 =  Conv2D(64,(3,3),strides=(1,1),padding='valid')(x)
    x31 = BatchNormalization()(x31)
    x = layers.Maximum()([x3,x31])
    x = Conv2D(64,(2,2),strides=(1,1),padding='valid',activation='relu')(x)

    x = Concatenate()([x2,x])
    return x
示例#4
0
def test_merge_maximum():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    o = layers.maximum([i1, i2])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2], o)

    max_layer = layers.Maximum()
    o2 = max_layer([i1, i2])
    assert max_layer.output_shape == (None, 4, 5)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, np.maximum(x1, x2), atol=1e-4)
示例#5
0
def squeeze_excite_block(input, ratio=0.25):
    cse = channel_squeeze_excite_block(input, ratio)
    sse = spatial_squeeze_excite_block(input)
    output = layers.Maximum()([cse, sse])
    return output
示例#6
0
def three_path(X_input):
    """
    Three pathway block.
    :param X_input: Input to the block
    :return: Output of the block
    """
    # Small Path
    X0 = Conv2D(64, (5, 5),
                strides=(1, 1),
                padding='valid',
                kernel_regularizer=l1_l2(0.01, 0.01))(X_input)
    X0 = LeakyReLU(alpha=0.3)(X0)
    X0 = BatchNormalization()(X0)
    X0 = Conv2D(64, (5, 5),
                strides=(1, 1),
                padding='valid',
                kernel_regularizer=l1_l2(0.01, 0.01))(X0)
    X0 = LeakyReLU(alpha=0.3)(X0)
    X0 = BatchNormalization()(X0)
    X0 = Conv2D(64, (5, 5),
                strides=(1, 1),
                padding='valid',
                kernel_regularizer=l1_l2(0.01, 0.01))(X0)
    X0 = LeakyReLU(alpha=0.3)(X0)
    X0 = BatchNormalization()(X0)

    # Local path
    X = Conv2D(64, (7, 7),
               strides=(1, 1),
               padding='valid',
               kernel_regularizer=l1_l2(0.01, 0.01))(X_input)
    X = LeakyReLU(alpha=0.3)(X)
    X = BatchNormalization()(X)
    X1 = Conv2D(64, (7, 7),
                strides=(1, 1),
                padding='valid',
                kernel_regularizer=l1_l2(0.01, 0.01))(X_input)
    X1 = LeakyReLU(alpha=0.3)(X1)
    X1 = BatchNormalization()(X1)
    X = layers.Maximum()([X, X1])
    X = Conv2D(64, (4, 4),
               strides=(1, 1),
               padding='valid',
               kernel_regularizer=l1_l2(0.01, 0.01))(X)
    X = LeakyReLU(alpha=0.3)(X)
    X = BatchNormalization()(X)
    X3 = Conv2D(64, (3, 3),
                strides=(1, 1),
                padding='valid',
                kernel_regularizer=l1_l2(0.01, 0.01))(X)
    X3 = LeakyReLU(alpha=0.3)(X3)
    X3 = BatchNormalization()(X3)
    X31 = Conv2D(64, (3, 3),
                 strides=(1, 1),
                 padding='valid',
                 kernel_regularizer=l1_l2(0.01, 0.01))(X)
    X31 = LeakyReLU(alpha=0.3)(X31)
    X31 = BatchNormalization()(X31)
    X = layers.Maximum()([X3, X31])
    X = Conv2D(64, (2, 2),
               strides=(1, 1),
               padding='valid',
               kernel_regularizer=l1_l2(0.01, 0.01))(X)
    X = LeakyReLU(alpha=0.3)(X)
    X = BatchNormalization()(X)

    # Global path
    X2 = Conv2D(160, (13, 13),
                strides=(1, 1),
                padding='valid',
                kernel_regularizer=l1_l2(0.01, 0.01))(X_input)
    X2 = LeakyReLU(alpha=0.3)(X2)
    X2 = BatchNormalization()(X2)
    # X21 = Conv2D(160, (13, 13), strides=(1, 1), padding='valid', activation='relu', kernel_regularizer=l1_l2(0.01, 0.01))(X_input)
    X21 = Conv2D(160, (13, 13),
                 strides=(1, 1),
                 padding='valid',
                 kernel_regularizer=l1_l2(0.01, 0.01))(X_input)
    X21 = LeakyReLU(alpha=0.3)(X21)
    X21 = BatchNormalization()(X21)
    X2 = layers.Maximum()([X2, X21])

    # Merging the two paths
    X = Concatenate()([X2, X, X0])
    return X