def AttentionResNet18(shape=(224, 224, 3), n_channels=64, n_classes=5, dropout=0, regularization=0.01):
    """
    Attention ResNet-18 with an attention-layer, variable imput size 
    """

    regularizer = l2(regularization)

    input_ = Input(shape=shape)
    x = Conv2D(n_channels, (7, 7), strides=(2, 2),
               padding='same')(input_)  # 112x112
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=( 2, 2), padding='same')(x)  # 56x56

    x = residual_block(x, output_channels=n_channels * 16, stride=4)  # 14x14

    # ATTENTION LAYER
    x = attention_block(x, encoder_depth=1)  # bottleneck 7x7

    x = residual_block(x, output_channels=n_channels * 32, stride=2)  # 7x7

    pool_size = (x.get_shape()[1], x.get_shape()[2])
    x = AveragePooling2D(pool_size=pool_size, strides=(1, 1))(x)
    x = Flatten()(x)
    if dropout:
        x = Dropout(dropout)(x)
    output = Dense(n_classes, kernel_regularizer=regularizer,
                   activation='softmax')(x)

    model = Model(input_, output)
    return model
Exemple #2
0
    def __init__(self, signal, params):
        self.input = signal
        self.params = params

        model = blocks.residual_block(signal, bn=True)
        for i in range(7):
            model = blocks.residual_block(model)

        self.logits = tf.keras.layers.Dense(5)(model)
Exemple #3
0
    def __init__(self, signal, params):
        self.input = signal
        self.params = params

        model = blocks.residual_block(signal, bn=True)
        for i in range(4):
            model = blocks.residual_block(model)

        max_dilation = 64
        i = 1
        while i <= max_dilation:
            model = blocks.tcn_block(model, i)
            i = i * 2
        self.logits = tf.keras.layers.Dense(5)(model)
Exemple #4
0
    def __init__(self, signal, params):
        self.input = signal
        self.params = params

        model = blocks.residual_block(signal, bn=True)
        for i in range(4):
            model = blocks.residual_block(model)

        skip_connections = []
        max_dilation = 64
        i = 1
        while i <= max_dilation:
            jump, model = blocks.tcn_block_both_directions(model, i)
            skip_connections.append(jump)
            i = i * 2
        model = tf.keras.layers.Add()(skip_connections)
        self.logits = tf.keras.layers.Dense(5)(model)
def AttentionResNetCifar10(shape=(32, 32, 3), n_channels=32, n_classes=10):
    """
    Attention-56 ResNet for Cifar10 Dataset
    https://arxiv.org/abs/1704.06904
    """
    input_ = Input(shape=shape)
    x = Conv2D(n_channels, (5, 5), padding='same')(input_)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)  # 16x16

    x = residual_block(x, input_channels=32, output_channels=128)
    x = attention_block(x, encoder_depth=2)

    x = residual_block(x, input_channels=128, output_channels=256, stride=2)  # 8x8
    x = attention_block(x, encoder_depth=1)

    x = residual_block(x, input_channels=256, output_channels=512, stride=2)  # 4x4
    x = attention_block(x, encoder_depth=1)

    x = residual_block(x, input_channels=512, output_channels=1024)
    x = residual_block(x, input_channels=1024, output_channels=1024)
    x = residual_block(x, input_channels=1024, output_channels=1024)

    x = AveragePooling2D(pool_size=(4, 4), strides=(1, 1))(x)  # 1x1
    x = Flatten()(x)
    output = Dense(n_classes, activation='softmax')(x)

    model = Model(input_, output)
    return model
Exemple #6
0
def AttentionResNetCifar10(shape=(32, 32, 3), n_channels=32, n_classes=10):
    input_ = Input(shape=shape)
    x = Conv2D(n_channels, (5, 5), padding='same')(input_)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPool2D(pool_size=(2, 2))(x)  # 16x16

    x = residual_block(x, input_channels=32, output_channels=128)
    x = attention_block(x, encoder_depth=2)
    x = Conv2D(x.get_shape()[-1].value, (1, 1),name='residual_attention_stage1')(x)

    x = residual_block(x, input_channels=128, output_channels=256, stride=2)  # 8x8
    x = attention_block(x, encoder_depth=1)
    x = Conv2D(x.get_shape()[-1].value, (1, 1),name='residual_attention_stage2')(x)

    x = residual_block(x, input_channels=256, output_channels=512, stride=2)  # 4x4
    x = attention_block(x, encoder_depth=1) #第三个Attention Module在论文中是不需要要skip_connections的,所以encoder_depth在此处应该为0,不过代码为了方便还是设为1
    x = Conv2D(x.get_shape()[-1].value, (1, 1),name='residual_attention_stage3')(x)

    x = residual_block(x, input_channels=512, output_channels=1024)
    x = residual_block(x, input_channels=1024, output_channels=1024)
    x = residual_block(x, input_channels=1024, output_channels=1024)

    x = AveragePooling2D(pool_size=(4, 4), strides=(1, 1))(x)  # 1x1
    x = Flatten()(x)
    output = Dense(n_classes, activation='softmax')(x)

    model = Model(input_, output)
    
    return model
Exemple #7
0
 def __init__(self, signal, params):
     self.input = signal
     self.params = params
     max_dilation = 128
     model = self.input
     for i in range(3):
         model = blocks.residual_block(model, i == 0)
     i = 1
     skip_connections = []
     while i <= max_dilation:
         model, skip = blocks.wavenet_bidirectional_block(model, i)
         skip_connections.append(skip)
         i = i * 2
     skip_sum = tf.keras.layers.Add()(skip_connections)
     self.logits = tf.keras.layers.Dense(5)(skip_sum)
def AttentionResNet92(shape=(224, 224, 3),
                      n_channels=64,
                      n_classes=100,
                      dropout=0,
                      regularization=0.01):
    """
    Attention-92 ResNet
    https://arxiv.org/abs/1704.06904
    """
    regularizer = l2(regularization)

    input_ = Input(shape=shape)
    x = Conv2D(n_channels, (7, 7), strides=(2, 2),
               padding='same')(input_)  # 112x112
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                     padding='same')(x)  # 56x56

    x = residual_block(x, output_channels=n_channels * 4)  # 56x56
    x = attention_block(x, encoder_depth=3)  # bottleneck 7x7

    x = residual_block(x, output_channels=n_channels * 8, stride=2)  # 28x28
    x = attention_block(x, encoder_depth=2)  # bottleneck 7x7
    x = attention_block(x, encoder_depth=2)  # bottleneck 7x7

    x = residual_block(x, output_channels=n_channels * 16, stride=2)  # 14x14
    x = attention_block(x, encoder_depth=1)  # bottleneck 7x7
    x = attention_block(x, encoder_depth=1)  # bottleneck 7x7
    x = attention_block(x, encoder_depth=1)  # bottleneck 7x7

    x = residual_block(x, output_channels=n_channels * 32, stride=2)  # 7x7
    x = residual_block(x, output_channels=n_channels * 32)
    x = residual_block(x, output_channels=n_channels * 32)

    pool_size = (x.get_shape()[1].value, x.get_shape()[2].value)
    x = AveragePooling2D(pool_size=pool_size, strides=(1, 1))(x)
    x = Flatten()(x)
    if dropout:
        x = Dropout(dropout)(x)
    output = Dense(n_classes,
                   kernel_regularizer=regularizer,
                   activation='softmax')(x)

    model = Model(input_, output)
    return model
Exemple #9
0
    def __call__(self, x, reuse=False, is_training=True):
        nb_upsampling = int(np.log2(self.target_size[0] // 16))
        with tf.variable_scope(self.name) as vs:
            if reuse:
                vs.reuse_variables()

            _x = first_block(x, (16, 16), self.noise_dim, 'deconv',
                             self.normalization, is_training)

            residual_inputs = conv_block(_x,
                                         is_training=is_training,
                                         filters=64,
                                         activation_='relu',
                                         sampling='same',
                                         normalization=self.normalization,
                                         dropout_rate=0.,
                                         mode='conv_first')

            with tf.variable_scope('residual_blocks'):
                for i in range(16):
                    _x = residual_block(_x,
                                        is_training=is_training,
                                        filters=64,
                                        activation_='relu',
                                        sampling='same',
                                        normalization=self.normalization,
                                        dropout_rate=0.,
                                        mode='conv_first')

            _x = conv_block(_x,
                            is_training=is_training,
                            filters=64,
                            activation_='relu',
                            sampling='same',
                            normalization=self.normalization,
                            dropout_rate=0.,
                            mode='conv_first')
            _x += residual_inputs

            with tf.variable_scope('upsampling_blocks'):
                for i in range(nb_upsampling):
                    _x = conv_block(_x,
                                    is_training=is_training,
                                    filters=64,
                                    activation_='relu',
                                    sampling=self.upsampling,
                                    normalization=self.normalization,
                                    dropout_rate=0.,
                                    mode='conv_first')

            __x = _x
            for _ in range(3):
                __x = conv_block(__x,
                                 is_training=is_training,
                                 filters=64,
                                 activation_='relu',
                                 sampling='same',
                                 normalization=self.normalization,
                                 dropout_rate=0.,
                                 mode='conv_first')
            _x += __x
            _x = conv_block(_x,
                            is_training=is_training,
                            kernel_size=(9, 9),
                            filters=self.channel,
                            activation_=self.last_activation,
                            sampling='same',
                            normalization=None,
                            dropout_rate=0.,
                            mode='conv_first')
            return _x