Пример #1
0
    def f(x):
        in_channels = K.int_shape(x)[channel_index]

        if in_channels == filters:
            y = x
            if batch_norm:
                y = BatchNormalization()(y)
            y = make_activation(activation, **kwargs)(y)
        else:
            if batch_norm:
                x = BatchNormalization()(x)
            x = make_activation(activation, **kwargs)(x)
            y = x

        y = make_conv(filters, activation=activation, strides=strides, **kwargs)(y)
        if batch_norm:
            y = BatchNormalization()(y)
        y = make_activation(activation, **kwargs)(y)
        if dropout != 0:
            y = make_dropout(dropout, activation=activation)(y)

        y = make_conv(filters, activation=activation, **kwargs)(y)

        if not residual:
            return y

        shortcut = x if in_channels == filters else make_conv(filters, shape=(1, 1), strides=strides, **kwargs)(x)
        return Add()([y, shortcut])
Пример #2
0
 def f(x):
     y = make_activation(activation, **kwargs)(x)
     y = make_conv(filters, strides=strides)(y)
     if batch_norm:
         y = BatchNormalization()(y)
     y = make_activation(activation, **kwargs)(y)
     y = make_conv(filters)(y)
     if batch_norm:
         y = BatchNormalization()(y)
     return y
Пример #3
0
def make_wrn(activation, input_shape, output_size, depth=4, k=2, dropout=True, batch_norm=True, residual=True, **kwargs):
    assert((depth - 4) % 6 == 0)
    n = (depth - 4) / 6
    dropout = 0.3 if dropout else 0

    x = Input(shape=input_shape)
    y = x

    y = make_conv(16, activation=activation, **kwargs)(y)
    for _ in range(n):
        y = _make_block(16*k, activation=activation, batch_norm=batch_norm, dropout=dropout, residual=residual, **kwargs)(y)

    y = _make_block(32*k, activation=activation, batch_norm=batch_norm, dropout=dropout, strides=(2, 2), residual=residual, **kwargs)(y)
    for _ in range(n-1):
        y = _make_block(32*k, activation=activation, batch_norm=batch_norm, dropout=dropout, residual=residual, **kwargs)(y)

    y = _make_block(64*k, activation=activation, batch_norm=batch_norm, dropout=dropout, strides=(2, 2), residual=residual, **kwargs)(y)
    for _ in range(n-1):
        y = _make_block(64*k, activation=activation, batch_norm=batch_norm, dropout=dropout, residual=residual, **kwargs)(y)

    if batch_norm:
        y = BatchNormalization()(y)
    y = make_activation(activation, **kwargs)(y)
    y = GlobalMaxPooling2D()(y)
    y = make_dense(output_size, activation='softmax', **kwargs)(y)

    return Model(inputs=x, outputs=y)
Пример #4
0
    def f(x):
        in_channels = K.int_shape(x)[channel_index]
        if in_channels == filters:
            y = x
        else:
            y = make_activation(activation, **kwargs)(x)

            y1 = AveragePooling2D((1, 1), strides=strides, padding='same')(y)
            y1 = make_conv(filters / 2, shape=(1, 1))(y1)

            y2 = ZeroPadding2D(padding=((1, 0), (1, 0)))(y)
            y2 = Cropping2D(cropping=((0, 1), (0, 1)))(y2)
            y2 = AveragePooling2D((1, 1), strides=strides, padding='same')(y2)
            y2 = make_conv(filters / 2, shape=(1, 1))(y2)

            y = Concatenate(axis=channel_index)([y1, y2])
            if batch_norm:
                y = BatchNormalization()(y)
        return y
Пример #5
0
def make_shakeshake(activation,
                    input_shape,
                    output_size,
                    depth=26,
                    k=32,
                    batch_norm=True,
                    residual=True,
                    **kwargs):
    assert ((depth - 2) % 6 == 0)
    n = (depth - 2) / 6

    x = Input(batch_shape=(kwargs['batch_size'], ) + input_shape)
    y = make_conv(16)(x)
    y = BatchNormalization()(y)

    for _ in range(n):
        y = _make_block(k,
                        activation=activation,
                        batch_norm=batch_norm,
                        residual=residual,
                        **kwargs)(y)

    y = _make_block(2 * k,
                    activation=activation,
                    batch_norm=batch_norm,
                    residual=residual,
                    strides=(2, 2),
                    **kwargs)(y)
    for _ in range(n - 1):
        y = _make_block(2 * k,
                        activation=activation,
                        batch_norm=batch_norm,
                        residual=residual,
                        **kwargs)(y)

    y = _make_block(4 * k,
                    activation=activation,
                    batch_norm=batch_norm,
                    residual=residual,
                    strides=(2, 2),
                    **kwargs)(y)
    for _ in range(n - 1):
        y = _make_block(4 * k,
                        activation=activation,
                        batch_norm=batch_norm,
                        residual=residual,
                        **kwargs)(y)

    y = make_activation(activation, **kwargs)(y)
    y = GlobalAveragePooling2D()(y)
    y = make_dense(output_size, activation='softmax', **kwargs)(y)

    return Model(inputs=x, outputs=y)
Пример #6
0
def make_fitnet(activation,
                input_shape,
                output_size,
                dropout=True,
                batch_norm=False,
                **kwargs):
    x = Input(shape=input_shape)
    y = x

    if batch_norm:
        y = BatchNormalization()(y)
    if dropout:
        y = make_dropout(0.1, activation=activation)(y)

    y = make_conv(64, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(64, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(64, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(96, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(96, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = MaxPooling2D()(y)
    if dropout:
        y = make_dropout(0.2, activation=activation)(y)

    y = make_conv(160, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(160, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(160, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(160, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(160, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = MaxPooling2D()(y)
    if dropout:
        y = make_dropout(0.3, activation=activation)(y)

    y = make_conv(256, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(256, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(256, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(256, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = make_conv(256, activation=activation, batch_norm=batch_norm,
                  **kwargs)(y)
    y = GlobalMaxPooling2D()(y)
    if dropout:
        y = make_dropout(0.4, activation=activation)(y)

    y = make_dense(2500,
                   activation=activation,
                   batch_norm=batch_norm,
                   **kwargs)(y)
    if dropout:
        y = make_dropout(0.5, activation=activation)(y)
    y = make_dense(output_size, activation='softmax', **kwargs)(y)

    return Model(inputs=x, outputs=y)