Example #1
0
File: ResNet.py Project: iiharu/NN
    def residual(self, inputs, filters, bottleneck=False, sub_sampling=False):
        strides = 2 if sub_sampling else 1
        kernel_size = (1, 1) if bottleneck else (3, 3)

        outputs = batch_normalization()(inputs)
        outputs = relu()(outputs)
        outputs = conv2d(filters=filters,
                         kernel_size=kernel_size,
                         strides=strides)(outputs)
        if sub_sampling or bottleneck:
            # inputs = batch_normalization()(inputs)
            # inputs = relu()(inputs)
            inputs = conv2d(filters=4 * filters if bottleneck else filters,
                            kernel_size=(1, 1),
                            strides=strides)(inputs)
        outputs = batch_normalization()(outputs)
        outputs = relu()(outputs)
        outputs = conv2d(filters=filters, kernel_size=(3, 3),
                         strides=1)(outputs)
        if bottleneck:
            outputs = batch_normalization()(outputs)
            outputs = relu()(outputs)
            outputs = conv2d(filters=4 * filters,
                             kernel_size=kernel_size,
                             strides=1)(outputs)
        outputs = add()([inputs, outputs])

        return outputs
Example #2
0
File: ResNet.py Project: iiharu/NN
    def build(self, input_shape, classes=10):

        filters = 16

        inputs = keras.Input(shape=input_shape)

        outputs = batch_normalization()(inputs)
        outputs = relu()(outputs)
        outputs = conv2d(filters=filters, kernel_size=(3, 3))(outputs)

        filters = filters * self.widening

        for i in range(3):
            for j in range(self.blocks):
                down_sampling = True if (i > 0 and j == 0) else False
                outputs = self.residual(outputs,
                                        filters=filters,
                                        down_sampling=down_sampling)

            filters = 2 * filters

        outputs = batch_normalization()(outputs)
        outputs = relu()(outputs)
        outputs = average_pooling2d(pool_size=(8, 8))(outputs)
        outputs = dense(10)(outputs)
        outputs = softmax()(outputs)

        model = keras.Model(inputs, outputs)

        model.summary()

        return model
Example #3
0
    def conv_block(self, inputs):
        outputs = batch_normalization()(inputs)
        if self.bottleneck:
            outputs = relu()(outputs)
            outputs = conv2d(filters=4 * self.growth_rate,
                             kernel_size=(1, 1))(outputs)
            outputs = batch_normalization()(outputs)
        outputs = relu()(outputs)
        outputs = conv2d(filters=self.growth_rate, kernel_size=(3, 3))(outputs)
        outputs = concat()([inputs, outputs])

        return outputs
Example #4
0
    def Dense_net(self, input_x):
        x = layers.conv2d(input_x, filters=2*self.filters, kernel_size=[7, 7], strides=[2, 2],
                          kernel_regularizer=layers.l2_regularizer(0.0005),
                          padding='valid', activation=None, name='conv0')

        x = self.dense_block(input_x=x, nb_layers=6, layer_name='dense_1')
        x = self.transition_layer(x, scope='trans_1')

        x = self.dense_block(input_x=x, nb_layers=12, layer_name='dense_2')
        x = self.transition_layer(x, scope='trans_2')

        x = self.dense_block(input_x=x, nb_layers=48, layer_name='dense_3')
        x = self.transition_layer(x, scope='trans_3')

        x = self.dense_block(input_x=x, nb_layers=32, layer_name='dense_final')

        # 100 Layer
        x = layers.batch_normalization(x, training=self.training, name='linear_batch')
        x = layers.selu(x)
        x = layers.global_ave_pool2d(x)
        # x = flatten(x)
        x = layers.fully_connected(x, self.class_num, use_bias=False, activation_fn=None, trainable=self.training,
                                   name='full_connecting')

        # x = tf.reshape(x, [-1, 10])
        return x
Example #5
0
File: ResNet.py Project: iiharu/NN
 def conv(self, inputs, filters, kernel_size, strides=1):
     inputs = batch_normalization()(inputs)
     inputs = relu()(inputs)
     inputs = conv2d(filters=filters,
                     kernel_size=kernel_size,
                     strides=strides)(inputs)
     return inputs
Example #6
0
    def bottleneck_layer(self, x, scope):
        with tf.variable_scope(scope):
            x = layers.batch_normalization(x, training=self.training, name=scope + '_batch1')
            x = layers.selu(x)
            x = layers.conv2d(x, filters=4 * self.filters, kernel_size=[1, 1], strides=[1, 1],
                              kernel_regularizer=layers.l2_regularizer(0.0005),
                              padding='same', activation=None, name=scope + '_conv1')
            x = layers.drop_out(x, rate=self.dropout, training=self.training)

            x = layers.batch_normalization(x, training=self.training, name=scope + '_batch2')
            x = layers.selu(x)
            x = layers.conv2d(x, filters=self.filters, kernel_size=[3, 3], strides=[1, 1],
                              kernel_regularizer=layers.l2_regularizer(0.0005),
                              padding='same', activation=None, name=scope + '_conv2')
            x = layers.drop_out(x, rate=self.dropout, training=self.training)

            return x
Example #7
0
    def transition_block(self, inputs):
        filters = keras.backend.int_shape(inputs)[
            self.batch_normalization_axis]
        if self.compression:
            filters = int(filters * (1 - self.reduction_rate))

        outputs = batch_normalization()(inputs)
        outputs = relu()(outputs)
        outputs = conv2d(filters=filters, kernel_size=(1, 1))(outputs)
        outputs = average_pooling2d(pool_size=(2, 2), strides=2)(outputs)

        return outputs
Example #8
0
    def __init__(self,
                 input_layers=None,
                 outputs_layers=None,
                 inception_layers=None):
        self.inception_version = 1
        if input_layers is None:
            self.input_layers = [
                conv2d(filters=64,
                       kernel_size=(7, 7),
                       strides=2,
                       padding='same'),
                max_pooling2d(pool_size=(3, 3), strides=2, padding='same'),
                batch_normalization(),
                conv2d(filters=192,
                       kernel_size=(1, 1),
                       strides=1,
                       padding='valid'),
                conv2d(filters=192,
                       kernel_size=(3, 3),
                       strides=1,
                       padding='same'),
                batch_normalization(),
                max_pooling2d(pool_size=(3, 3), strides=2, padding='same')
            ]
        else:
            self.input_layers = input_layers

        if outputs_layers is None:
            self.output_layers = [
                average_pooling2d(pool_size=(7, 7), strides=1,
                                  padding='valid'),
                flatten(),
                dropout(0.4),
                dense(1000),
                softmax()
            ]
        else:
            self.output_layers = outputs_layers
Example #9
0
File: ResNet.py Project: iiharu/NN
 def __init__(
         self,
         blocks,
         filters,
         bottleneck=False,
         input_layers=[
             batch_normalization(),
             relu(),
             conv2d(filters=64, kernel_size=(7, 7), strides=2),
         ],
         output_layers=[average_pooling2d(pool_size=(2, 2)),
                        flatten()]):
     self.blocks = blocks
     self.filters = filters
     self.bottleneck = bottleneck
     self.bn_axis = -1 if keras.backend.image_data_format(
     ) == 'channels_last' else 1
     self.input_layers = input_layers
     self.output_layer = output_layers
Example #10
0
    def build(self, input_shape, classes):
        # input
        inputs = keras.Input(shape=input_shape)

        filters = 64
        outputs = conv2d(filters=filters,
                         kernel_size=(7, 7),
                         strides=2,
                         padding='same')(inputs)
        outputs = max_pooling2d(pool_size=(3, 3), strides=2,
                                padding='same')(outputs)
        filters = 192
        # using batch norm instead of local response norm
        outputs = batch_normalization()(outputs)
        outputs = conv2d(filters=filters,
                         kernel_size=(1, 1),
                         strides=1,
                         padding='valid')(outputs)
        outputs = conv2d(filters=filters,
                         kernel_size=(3, 3),
                         strides=1,
                         padding='same')(outputs)
        # using batch norm instead of local response norm
        outputs = batch_normalization()(outputs)
        outputs = max_pooling2d(pool_size=(3, 3), strides=2,
                                padding='same')(outputs)

        # inception (3a)
        filters = 256
        outputs = self.inception(inputs=outputs, filters=filters)
        # inception (3b)
        filters = 480
        outputs = self.inception(inputs=outputs, filters=filters)

        outputs = max_pooling2d(pool_size=(3, 3), strides=2,
                                padding='same')(outputs)

        # inception (4a)
        filters = 512
        outputs = self.inception(inputs=outputs, filters=filters)

        # if K.learning_phase() == 1:
        outputs2 = self.classifier_aux(outputs, classes=classes)

        # inception (4b)
        outputs = self.inception(inputs=outputs, filters=filters)
        # inception (4c)
        outputs = self.inception(inputs=outputs, filters=filters)
        # inception (4d)
        filters = 528
        outputs = self.inception(inputs=outputs, filters=filters)

        # if K.learning_phase() == 1:
        outputs1 = self.classifier_aux(outputs, classes=classes)

        # inception (4e)
        filters = 832
        outputs = self.inception(inputs=outputs, filters=filters)

        outputs = max_pooling2d(pool_size=(2, 2), strides=2,
                                padding='same')(outputs)

        # inception (5a)
        outputs = self.inception(inputs=outputs, filters=filters)
        # inception (5b)
        filters = 1024
        outputs = self.inception(inputs=outputs, filters=filters)

        # classifier
        outputs0 = self.classifier_main(inputs=outputs, classes=1000)

        model = keras.Model(inputs=inputs,
                            outputs=[outputs0, outputs1, outputs2])

        model.summary()

        return model
Example #11
0
def separable_conv_block(inputs, filters, kernel_size=(3, 3), strides=1):
    inputs = separable_conv2d(filters=filters, kernel_size=kernel_size)(inputs)
    inputs = batch_normalization()(inputs)
    return inputs
Example #12
0
def build_encoder(x):
    # The encoder uses the deep residual network.
    outputs = []
    pooling = [1, 2, 2, 1]

    shape = x.get_shape().as_list()
    bs = shape[0]
    seq = shape[1]
    temp_shape = [bs * seq] + shape[2:]
    x = tf.reshape(x, temp_shape)
    # print x.get_shape().as_list()

    # layer 0
    with tf.variable_scope("encoder_layer0", reuse=tf.AUTO_REUSE):
        conv0_0 = layers.conv_layer(name="conv0_0",
                                    x=x,
                                    filter_shape=layers.create_variable(
                                        "filter0_0", shape=[7, 7, 3, 96]))
        conv0_0 = layers.batch_normalization(conv0_0, "conv0_0_bn")
        conv0_0 = layers.relu_layer(conv0_0)
        conv0_1 = layers.conv_layer(name="conv0_1",
                                    x=conv0_0,
                                    filter_shape=layers.create_variable(
                                        "filter0_1", shape=[3, 3, 96, 96]))
        conv0_1 = layers.batch_normalization(conv0_1, "conv0_1_bn")
        conv0_1 = layers.relu_layer(conv0_1)
        shortcut0 = layers.conv_layer(name="shortcut",
                                      x=x,
                                      filter_shape=layers.create_variable(
                                          "filter0_2", shape=[1, 1, 3, 96]))
        shortcut0 = layers.batch_normalization(shortcut0, "shortcut0_bn")
        shortcut0 = layers.relu_layer(shortcut0)
        layer0 = layers.pooling_layer("pooling", conv0_1 + shortcut0, pooling)
        outputs.append(layer0)  # [bs * size, 64, 64, 96]

    # layer 1
    with tf.variable_scope("encoder_layer1", reuse=tf.AUTO_REUSE):
        conv1_0 = layers.conv_layer(name="conv1_0",
                                    x=layer0,
                                    filter_shape=layers.create_variable(
                                        "filter1_0", shape=[3, 3, 96, 128]))
        conv1_0 = layers.batch_normalization(conv1_0, "conv1_0_bn")
        conv1_0 = layers.relu_layer(conv1_0)
        conv1_1 = layers.conv_layer(name="conv1_1",
                                    x=conv1_0,
                                    filter_shape=layers.create_variable(
                                        "filter1_1", shape=[3, 3, 128, 128]))
        conv1_1 = layers.batch_normalization(conv1_1, "conv1_1_bn")
        conv1_1 = layers.relu_layer(conv1_1)
        shortcut1 = layers.conv_layer(name="shortcut",
                                      x=layer0,
                                      filter_shape=layers.create_variable(
                                          "filter1_2", shape=[1, 1, 96, 128]))
        shortcut1 = layers.batch_normalization(shortcut1, "shortcut1_bn")
        shortcut1 = layers.relu_layer(shortcut1)
        layer1 = layers.pooling_layer("pooling", conv1_1 + shortcut1, pooling)
        outputs.append(layer1)  # [bs * size, 32, 32, 128]

    # layer 2
    with tf.variable_scope("encoder_layer2", reuse=tf.AUTO_REUSE):
        conv2_0 = layers.conv_layer(name="conv2_0",
                                    x=layer1,
                                    filter_shape=layers.create_variable(
                                        "filter2_0", shape=[3, 3, 128, 256]))
        conv2_0 = layers.batch_normalization(conv2_0, "conv2_0_bn")
        conv2_0 = layers.relu_layer(conv2_0)
        conv2_1 = layers.conv_layer(name="conv2_1",
                                    x=conv2_0,
                                    filter_shape=layers.create_variable(
                                        "filter2_1", shape=[3, 3, 256, 256]))
        conv2_1 = layers.batch_normalization(conv2_1, "conv2_1_bn")
        conv2_1 = layers.relu_layer(conv2_1)
        shortcut2 = layers.conv_layer(name="shortcut",
                                      x=layer1,
                                      filter_shape=layers.create_variable(
                                          "filter2_2", shape=[1, 1, 128, 256]))
        shortcut2 = layers.batch_normalization(shortcut2, "shortcut2_bn")
        shortcut2 = layers.relu_layer(shortcut2)
        layer2 = layers.pooling_layer("pooling", conv2_1 + shortcut2, pooling)
        outputs.append(layer2)  # [bs * size, 16, 16, 256]

    # layer 3
    with tf.variable_scope("encoder_layer3", reuse=tf.AUTO_REUSE):
        conv3_0 = layers.conv_layer(name="conv3_0",
                                    x=layer2,
                                    filter_shape=layers.create_variable(
                                        "filter3_0", shape=[3, 3, 256, 256]))
        conv3_0 = layers.batch_normalization(conv3_0, "conv3_0_bn")
        conv3_0 = layers.relu_layer(conv3_0)
        conv3_1 = layers.conv_layer(name="conv3_1",
                                    x=conv3_0,
                                    filter_shape=layers.create_variable(
                                        "filter3_1", shape=[3, 3, 256, 256]))
        conv3_1 = layers.batch_normalization(conv3_1, "conv3_1_bn")
        conv3_1 = layers.relu_layer(conv3_1)
        layer3 = layers.pooling_layer("pooling", conv3_1, pooling)
        outputs.append(layer3)  # [bs * size, 8, 8, 256]

    # layer 4
    with tf.variable_scope("encoder_layer4", reuse=tf.AUTO_REUSE):
        conv4_0 = layers.conv_layer(name="conv4_0",
                                    x=layer3,
                                    filter_shape=layers.create_variable(
                                        "filter4_0", shape=[3, 3, 256, 256]))
        conv4_0 = layers.batch_normalization(conv4_0, "conv4_0_bn")
        conv4_0 = layers.relu_layer(conv4_0)
        conv4_1 = layers.conv_layer(name="conv4_1",
                                    x=conv4_0,
                                    filter_shape=layers.create_variable(
                                        "filter4_1", shape=[3, 3, 256, 256]))
        conv4_1 = layers.batch_normalization(conv4_1, "conv4_1_bn")
        conv4_1 = layers.relu_layer(conv4_1)
        shortcut4 = layers.conv_layer(name="shortcut",
                                      x=layer3,
                                      filter_shape=layers.create_variable(
                                          "filter4_2", shape=[1, 1, 256, 256]))
        shortcut4 = layers.batch_normalization(shortcut4, "shortcut4_bn")
        shortcut4 = layers.relu_layer(shortcut4)
        layer4 = layers.pooling_layer("pooling", conv4_1 + shortcut4, pooling)
        outputs.append(layer4)  # [bs * size, 4, 4, 256]

    # layer 5
    with tf.variable_scope("encoder_layer5", reuse=tf.AUTO_REUSE):
        conv5_0 = layers.conv_layer(name="conv5_0",
                                    x=layer4,
                                    filter_shape=layers.create_variable(
                                        "filter5_0", shape=[3, 3, 256, 256]))
        conv5_0 = layers.batch_normalization(conv5_0, "conv5_0_bn")
        conv5_0 = layers.relu_layer(conv5_0)
        conv5_1 = layers.conv_layer(name="conv5_1",
                                    x=conv5_0,
                                    filter_shape=layers.create_variable(
                                        "filter5_1", shape=[3, 3, 256, 256]))
        conv5_1 = layers.batch_normalization(conv5_1, "conv5_1_bn")
        conv5_1 = layers.relu_layer(conv5_1)
        shortcut5 = layers.conv_layer(name="shortcut",
                                      x=layer4,
                                      filter_shape=layers.create_variable(
                                          "filter5_2", shape=[1, 1, 256, 256]))
        shortcut5 = layers.batch_normalization(shortcut5, "shortcut5_bn")
        shortcut5 = layers.relu_layer(shortcut5)
        layer5 = layers.pooling_layer("pooling", conv5_1 + shortcut5, pooling)
        outputs.append(layer5)  # [bs * size, 2, 2, 256]

    final_shape = [bs, seq, fc_layer_size[0]]
    # Flatten layer and fully connected layer
    flatten = layers.flatten_layer(layer5)
    outputs.append(flatten)

    with tf.variable_scope("fc_layer", reuse=tf.AUTO_REUSE):
        layer_fc = layers.fully_connected_layer(flatten, fc_layer_size[0],
                                                "fclayer_w", "fclayer_b")
        # layer_fc = layers.batch_normalization(layer_fc, "fc_bn")
        layer_fc = layers.relu_layer(layer_fc)
        outputs.append(layer_fc)  # [bs * size, 1024]

    # [bs, size, 1024]
    return tf.reshape(outputs[-1], final_shape)