예제 #1
0
    def model(self, X, is_training, keep_prob):
        # mask = (X + 1) / 2

        # CONV L1
        X = conv2d(X, scope="CONV_1", filter=32, kernel_size=3, strides=1,
                   padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")
        # mask = tf.layers.average_pooling2d(mask, pool_size=3, strides=2, padding="same")
        # mask = tf.where(mask>0.1, mask, tf.zeros_like(mask))
        # X = tf.cast(tf.greater(mask, 0), tf.float32) * X

        # INCEPTION L2
        Res = conv2d(X, scope="CONV_res_2", filter=64,
                     kernel_size=1, strides=2, padding="same")
        X = inception_v2(input=X, scope="INCEPTION_2a", filters=64,
                         batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = inception_v2(input=X, scope="INCEPTION_2b", filters=64,
                         batch_norm=True, activation=None, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")
        # mask = tf.layers.max_pooling2d(mask, pool_size=3, strides=2, padding="same")
        # X = tf.cast(tf.greater(mask, 0), tf.float32) * X
        X = X + Res

        # INCEPTION L3
        Res = conv2d(X, scope="CONV_res_3", filter=128,
                     kernel_size=1, strides=2, padding="same")
        X = tf.nn.relu(X)
        X = inception_v2(input=X, scope="INCEPTION_3a", filters=128,
                         batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = inception_v2(input=X, scope="INCEPTION_3b", filters=128,
                         batch_norm=True, activation=None, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")
        # mask = tf.layers.max_pooling2d(mask, pool_size=3, strides=2, padding="same")
        # X = tf.cast(tf.greater(mask, 0), tf.float32) * X
        X = X + Res

        # INCEPTION L4
        Res = conv2d(X, scope="CONV_res_4", filter=256,
                     kernel_size=1, strides=2, padding="same")
        X = tf.nn.relu(X)
        X = inception_v2(input=X, scope="INCEPTION_4a", filters=256,
                         batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = inception_v2(input=X, scope="INCEPTION_4b", filters=256,
                         batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = inception_v2(input=X, scope="INCEPTION_4c", filters=256,
                         batch_norm=True, activation=None, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")
        X = X + Res
        X = maxout(X, num_units=128)

        # dw conv
        X = deepwise_conv2d(X, scope="dw_conv", kernel_size=(
            X.shape[1], X.shape[2]), strides=1, padding="valid")

        # flatten
        X = tf.layers.flatten(X)

        # Output
        X = tf.nn.l2_normalize(X, axis=-1)
        return X
예제 #2
0
    def __build_ssd_layers(self):
        with tf.variable_scope('ssd_layer'):
            conv6_2_pw_stop = tf.stop_gradient(self.base.conv6_2_pw)

            self.ssd_conv7_1 = conv2d('sdd_conv7_1', conv6_2_pw_stop,
                                    num_filters=256, kernel_size=(1, 1),
                                    padding='SAME', stride=(1, 1), activation=tf.nn.relu,
                                    batchnorm_enabled=self.args.batchnorm_enabled,
                                    l2_strength=self.args.l2_strength,
                                    is_training=self.is_training, bias=self.args.bias)

            self.ssd_conv7_2 = conv2d('sdd_conv7_2', self.ssd_conv7_1,
                                    num_filters=512, kernel_size=(3, 3),
                                    padding='SAME', stride=(2, 2), activation=tf.nn.relu,
                                    batchnorm_enabled=self.args.batchnorm_enabled,
                                    l2_strength=self.args.l2_strength,
                                    is_training=self.is_training, bias=self.args.bias)

            self.ssd_conv8_1 = conv2d('sdd_conv8_1', self.ssd_conv7_2,
                                    num_filters=128, kernel_size=(1, 1),
                                    padding='SAME', stride=(1, 1), activation=tf.nn.relu,
                                    batchnorm_enabled=self.args.batchnorm_enabled,
                                    l2_strength=self.args.l2_strength,
                                    is_training=self.is_training, bias=self.args.bias)

            self.ssd_conv8_2 = conv2d('sdd_conv8_2', self.ssd_conv8_1,
                                    num_filters=256, kernel_size=(3, 3),
                                    padding='VALID', stride=(2, 2), activation=tf.nn.relu,
                                    batchnorm_enabled=self.args.batchnorm_enabled,
                                    l2_strength=self.args.l2_strength,
                                    is_training=self.is_training, bias=self.args.bias)
def bottleneck_unet_block(
    inputs, filters, data_format='NCHW', is_training=True, conv2d_hparams=None, block_name='bottleneck_block'
):

    with tf.variable_scope(block_name):

        net = layers.conv2d(
            inputs,
            n_channels=filters,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            data_format=data_format,
            use_bias=True,
            trainable=is_training,
            kernel_initializer=conv2d_hparams.kernel_initializer,
            bias_initializer=conv2d_hparams.bias_initializer,
        )

        net = blocks.activation_block(
            inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act1'
        )

        net = layers.conv2d(
            net,
            n_channels=filters,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            data_format=data_format,
            use_bias=True,
            trainable=is_training,
            kernel_initializer=conv2d_hparams.kernel_initializer,
            bias_initializer=conv2d_hparams.bias_initializer,
        )

        net = blocks.activation_block(
            inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act2'
        )

        net = layers.deconv2d(
            net,
            n_channels=filters / 2,
            kernel_size=(2, 2),
            padding='same',
            data_format=data_format,
            use_bias=True,
            trainable=is_training,
            kernel_initializer=conv2d_hparams.kernel_initializer,
            bias_initializer=conv2d_hparams.bias_initializer,
        )

        net = blocks.activation_block(
            inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act3'
        )

        return net
예제 #4
0
    def __create_detector(self, x, depth, mapsize, name, l2_strength):
        x = conv2d(name, x, num_filters=depth, kernel_size=(3, 3),
                   padding='SAME', stride=(1, 1), activation=tf.nn.relu,
                   batchnorm_enabled=self.args.batchnorm_enabled,
                   l2_strength=self.args.l2_strength,
                   is_training=self.is_training, bias=self.args.bias)

        x = tf.reshape(x, [-1, mapsize.w*mapsize.h, depth])

        return x
예제 #5
0
def conv2d_block(inputs,
                 n_channels,
                 kernel_size=(3, 3),
                 strides=(2, 2),
                 mode='SAME',
                 use_batch_norm=True,
                 activation='relu',
                 is_training=True,
                 data_format='NHWC',
                 conv2d_hparams=None,
                 batch_norm_hparams=None,
                 name='conv2d'):

    if not isinstance(conv2d_hparams, tf.contrib.training.HParams):
        raise ValueError(
            "The paramater `conv2d_hparams` is not of type `HParams`")

    if not isinstance(batch_norm_hparams,
                      tf.contrib.training.HParams) and use_batch_norm:
        raise ValueError(
            "The paramater `conv2d_hparams` is not of type `HParams`")

    with tf.variable_scope(name):

        if mode != 'SAME_RESNET':
            net = layers.conv2d(
                inputs,
                n_channels=n_channels,
                kernel_size=kernel_size,
                strides=strides,
                padding=mode,
                data_format=data_format,
                use_bias=not use_batch_norm,
                trainable=is_training,
                kernel_initializer=conv2d_hparams.kernel_initializer,
                bias_initializer=conv2d_hparams.bias_initializer,
            )

        else:  # Special padding mode for ResNet models
            if strides == (1, 1):

                net = layers.conv2d(
                    inputs,
                    n_channels=n_channels,
                    kernel_size=kernel_size,
                    strides=strides,
                    padding='SAME',
                    data_format=data_format,
                    use_bias=not use_batch_norm,
                    trainable=is_training,
                    kernel_initializer=conv2d_hparams.kernel_initializer,
                    bias_initializer=conv2d_hparams.bias_initializer,
                )

            else:
                rate = 1  # Unused (for 'a trous' convolutions)

                kernel_height_effective = kernel_size[0] + (kernel_size[0] -
                                                            1) * (rate - 1)

                pad_h_beg = (kernel_height_effective - 1) // 2
                pad_h_end = kernel_height_effective - 1 - pad_h_beg

                kernel_width_effective = kernel_size[1] + (kernel_size[1] -
                                                           1) * (rate - 1)

                pad_w_beg = (kernel_width_effective - 1) // 2
                pad_w_end = kernel_width_effective - 1 - pad_w_beg

                padding = [[0, 0], [pad_h_beg, pad_h_end],
                           [pad_w_beg, pad_w_end], [0, 0]]

                if data_format == 'NCHW':
                    padding = [padding[0], padding[3], padding[1], padding[2]]

                padded_inputs = tf.pad(inputs, padding)

                net = layers.conv2d(
                    padded_inputs,  # inputs,
                    n_channels=n_channels,
                    kernel_size=kernel_size,
                    strides=strides,
                    padding='VALID',
                    data_format=data_format,
                    use_bias=not use_batch_norm,
                    trainable=is_training,
                    kernel_initializer=conv2d_hparams.kernel_initializer,
                    bias_initializer=conv2d_hparams.bias_initializer,
                )

        if use_batch_norm:
            net = layers.batch_norm(
                net,
                decay=batch_norm_hparams.decay,
                epsilon=batch_norm_hparams.epsilon,
                scale=batch_norm_hparams.scale,
                center=batch_norm_hparams.center,
                is_training=is_training,
                data_format=data_format,
                param_initializers=batch_norm_hparams.param_initializers)

        if activation == 'relu':
            net = layers.relu(net, name='relu')

        elif activation == 'tanh':
            net = layers.tanh(net, name='tanh')

        elif activation != 'linear' and activation is not None:
            raise KeyError('Invalid activation type: `%s`' % activation)

        return net
예제 #6
0
    def build_model(self,
                    inputs,
                    training=True,
                    reuse=False,
                    use_final_conv=False):

        with var_storage.model_variable_scope(self.model_hparams.model_name,
                                              reuse=reuse,
                                              dtype=self.model_hparams.dtype):

            with tf.variable_scope("input_reshape"):
                if self.model_hparams.input_format == 'NHWC' and self.model_hparams.compute_format == 'NCHW':
                    # Reshape inputs: NHWC => NCHW
                    inputs = tf.transpose(inputs, [0, 3, 1, 2])

                elif self.model_hparams.input_format == 'NCHW' and self.model_hparams.compute_format == 'NHWC':
                    # Reshape inputs: NCHW => NHWC
                    inputs = tf.transpose(inputs, [0, 2, 3, 1])

            if self.model_hparams.dtype != inputs.dtype:
                inputs = tf.cast(inputs, self.model_hparams.dtype)

            net = blocks.conv2d_block(
                inputs,
                n_channels=64,
                kernel_size=(7, 7),
                strides=(2, 2),
                mode='SAME',
                use_batch_norm=True,
                activation='relu',
                is_training=training,
                data_format=self.model_hparams.compute_format,
                conv2d_hparams=self.conv2d_hparams,
                batch_norm_hparams=self.batch_norm_hparams,
                name='conv2d')

            net = layers.max_pooling2d(
                net,
                pool_size=(3, 3),
                strides=(2, 2),
                padding='SAME',
                data_format=self.model_hparams.compute_format,
                name="max_pooling2d",
            )

            model_bottlenecks = self.model_hparams.layers_depth
            for block_id, block_bottleneck in enumerate(model_bottlenecks):
                for layer_id in range(
                        self.model_hparams.layers_count[block_id]):
                    stride = 2 if (layer_id == 0 and block_id != 0) else 1

                    net = blocks.bottleneck_block(
                        inputs=net,
                        depth=block_bottleneck * self.model_hparams.expansions,
                        depth_bottleneck=block_bottleneck,
                        cardinality=self.model_hparams.cardinality,
                        stride=stride,
                        training=training,
                        data_format=self.model_hparams.compute_format,
                        conv2d_hparams=self.conv2d_hparams,
                        batch_norm_hparams=self.batch_norm_hparams,
                        block_name="btlnck_block_%d_%d" % (block_id, layer_id),
                        use_se=self.model_hparams.use_se,
                        ratio=self.model_hparams.se_ratio)

            with tf.variable_scope("output"):
                net = layers.reduce_mean(
                    net,
                    keepdims=False,
                    data_format=self.model_hparams.compute_format,
                    name='spatial_mean')

                if use_final_conv:
                    logits = layers.conv2d(
                        net,
                        n_channels=self.model_hparams.n_classes,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding='SAME',
                        data_format=self.model_hparams.compute_format,
                        dilation_rate=(1, 1),
                        use_bias=True,
                        kernel_initializer=self.dense_hparams.
                        kernel_initializer,
                        bias_initializer=self.dense_hparams.bias_initializer,
                        trainable=training,
                        name='dense')
                else:
                    logits = layers.dense(
                        inputs=net,
                        units=self.model_hparams.n_classes,
                        use_bias=True,
                        trainable=training,
                        kernel_initializer=self.dense_hparams.
                        kernel_initializer,
                        bias_initializer=self.dense_hparams.bias_initializer)

                if logits.dtype != tf.float32:
                    logits = tf.cast(logits, tf.float32)

                axis = 3 if self.model_hparams.compute_format == "NHWC" and use_final_conv else 1
                probs = layers.softmax(logits, name="softmax", axis=axis)

            return probs, logits
def deconv2d(inputs,
             n_channels=8,
             kernel_size=(3, 3),
             padding='VALID',
             data_format='NHWC',
             use_bias=True,
             kernel_initializer=tf.variance_scaling_initializer(),
             bias_initializer=tf.zeros_initializer(),
             trainable=True,
             use_upscale_conv=True):

    padding = padding.upper()  # Enforce capital letters for the padding mode

    if data_format not in ['NHWC', 'NCHW']:
        raise ValueError(
            "Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" %
            data_format)

    if padding not in ['SAME', 'VALID']:
        raise ValueError(
            "Unknown padding: `%s` (accepted: ['SAME', 'VALID'])" % padding)

    with tf.variable_scope("deconv2d"):

        if use_upscale_conv:

            layer = layers.upscale_2d(
                inputs,
                size=(2, 2),
                method=tf.image.ResizeMethod.
                NEAREST_NEIGHBOR,  # [BILINEAR, NEAREST_NEIGHBOR, BICUBIC, AREA]
                align_corners=True,
                is_scale=True,
                data_format=data_format)

            layer = layers.conv2d(layer,
                                  n_channels=n_channels,
                                  kernel_size=kernel_size,
                                  strides=(1, 1),
                                  padding=padding,
                                  data_format=data_format,
                                  use_bias=use_bias,
                                  trainable=trainable,
                                  kernel_initializer=kernel_initializer,
                                  bias_initializer=bias_initializer)

        else:

            input_shape = inputs.get_shape()

            layer = tf.layers.conv2d_transpose(
                inputs=inputs,
                filters=n_channels,
                kernel_size=kernel_size,
                strides=(2, 2),
                padding=padding,
                data_format='channels_first'
                if data_format == "NCHW" else "channels_last",
                use_bias=use_bias,
                trainable=trainable,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer)

            _log_hparams(classname='Conv2DTranspose',
                         layername=layer.name,
                         n_channels=n_channels,
                         kernel_size=kernel_size,
                         strides=(2, 2),
                         padding=padding,
                         data_format=data_format,
                         use_bias=use_bias,
                         trainable=trainable,
                         input_shape=str(input_shape),
                         out_shape=str(layer.get_shape()),
                         out_dtype=layer.dtype)

    return layer
예제 #8
0
    def model(self, X, is_training, keep_prob):

        # CONV L1
        X = conv2d(X, scope="CONV_1", filter=32, kernel_size=3, strides=1,
                   padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")

        # XCEPTION L2
        Res = conv2d(X, scope="CONV_res_2", filter=128,
                     kernel_size=1, strides=2, padding="same")
        X = separable_conv2d(X, scope="XCEPTION_2a", filter=64, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = separable_conv2d(X, scope="XCEPTION_2b", filter=128, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=None, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")
        X = X + Res

        # XCEPTION L3
        Res = X
        X = tf.nn.relu(X)
        X = separable_conv2d(X, scope="XCEPTION_3a", filter=128, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = separable_conv2d(X, scope="XCEPTION_3b", filter=128, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = separable_conv2d(X, scope="XCEPTION_3c", filter=128, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=None, is_training=is_training)
        X = X + Res

        # XCEPTION L4
        Res = conv2d(X, scope="CONV_res_4", filter=128,
                     kernel_size=1, strides=2, padding="same")
        X = tf.nn.relu(X)
        X = separable_conv2d(X, scope="XCEPTION_4a", filter=128, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = separable_conv2d(X, scope="XCEPTION_4b", filter=128, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=None, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")
        X = X + Res

        # XCEPTION L5
        Res = conv2d(X, scope="CONV_res_5", filter=256,
                     kernel_size=1, strides=2, padding="same")
        X = tf.nn.relu(X)
        X = separable_conv2d(X, scope="XCEPTION_5a", filter=256, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = separable_conv2d(X, scope="XCEPTION_5b", filter=256, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=None, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")
        X = X + Res
        X = maxout(X, num_units=128)

        # dw conv to 1x1
        X = deepwise_conv2d(X, scope="dw_conv_to_1_1", kernel_size=(
            X.shape[1], X.shape[2]), strides=1, padding="valid")

        # flatten
        X = tf.layers.flatten(X)

        # Output
        X = tf.nn.l2_normalize(X, axis=-1)
        return X
예제 #9
0
    def model(self, X, is_training, keep_prob):

        # CONV L1
        X = conv2d(X, scope="CONV_1", filter=16, kernel_size=3, strides=1,
                   padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")

        # XCEPTION L2
        Res = conv2d(X, scope="CONV_res_2", filter=128,
                     kernel_size=1, strides=2, padding="same")
        X = separable_conv2d(X, scope="XCEPTION_2a", filter=64, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = separable_conv2d(X, scope="XCEPTION_2b", filter=128, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=None, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")
        X = X + Res

        # XCEPTION L3-L6
        for i in range(3, 7):
            Res = X
            X = tf.nn.relu(X)
            X = separable_conv2d(X, scope="XCEPTION_{}a".format(i), filter=128, kernel_size=3,
                                 strides=1, padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
            X = separable_conv2d(X, scope="XCEPTION_{}b".format(i), filter=128, kernel_size=3,
                                 strides=1, padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
            X = separable_conv2d(X, scope="XCEPTION_{}c".format(i), filter=128, kernel_size=3,
                                 strides=1, padding="same", batch_norm=True, activation=None, is_training=is_training)
            X = X + Res

        # XCEPTION L7
        Res = conv2d(X, scope="CONV_res_7", filter=256,
                     kernel_size=1, strides=2, padding="same")
        X = tf.nn.relu(X)
        X = separable_conv2d(X, scope="XCEPTION_7a", filter=128, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = separable_conv2d(X, scope="XCEPTION_7b", filter=256, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=None, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")
        X = X + Res

        # XCEPTION L8-L11
        for i in range(8, 12):
            Res = X
            X = tf.nn.relu(X)
            X = separable_conv2d(X, scope="XCEPTION_{}a".format(i), filter=256, kernel_size=3,
                                 strides=1, padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
            X = separable_conv2d(X, scope="XCEPTION_{}b".format(i), filter=256, kernel_size=3,
                                 strides=1, padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
            X = separable_conv2d(X, scope="XCEPTION_{}c".format(i), filter=256, kernel_size=3,
                                 strides=1, padding="same", batch_norm=True, activation=None, is_training=is_training)
            X = X + Res

        # XCEPTION L12
        Res = conv2d(X, scope="CONV_res_12", filter=256,
                     kernel_size=1, strides=2, padding="same")
        X = tf.nn.relu(X)
        X = separable_conv2d(X, scope="XCEPTION_12a", filter=256, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = separable_conv2d(X, scope="XCEPTION_12b", filter=256, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=None, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")
        X = X + Res

        # XCEPTION L13-L20
        for i in range(13, 21):
            Res = X
            X = tf.nn.relu(X)
            X = separable_conv2d(X, scope="XCEPTION_{}a".format(i), filter=256, kernel_size=3,
                                 strides=1, padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
            X = separable_conv2d(X, scope="XCEPTION_{}b".format(i), filter=256, kernel_size=3,
                                 strides=1, padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
            X = separable_conv2d(X, scope="XCEPTION_{}c".format(i), filter=256, kernel_size=3,
                                 strides=1, padding="same", batch_norm=True, activation=None, is_training=is_training)
            X = X + Res

        # XCEPTION L21
        Res = conv2d(X, scope="CONV_res_21", filter=512,
                     kernel_size=1, strides=2, padding="same")
        X = tf.nn.relu(X)
        X = separable_conv2d(X, scope="XCEPTION_21a", filter=256, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)
        X = separable_conv2d(X, scope="XCEPTION_21b", filter=512, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=None, is_training=is_training)
        X = tf.layers.max_pooling2d(X, pool_size=3, strides=2, padding="same")
        X = X + Res

        # XCEPTION L22
        X = separable_conv2d(X, scope="XCEPTION_22", filter=128, kernel_size=3, strides=1,
                             padding="same", batch_norm=True, activation=tf.nn.relu, is_training=is_training)

        # dw conv to 1x1
        X = deepwise_conv2d(X, scope="dw_conv_to_1_1", kernel_size=(
            X.shape[1], X.shape[2]), strides=1, padding="valid")

        # flatten
        X = tf.layers.flatten(X)

        # Output
        X = tf.nn.l2_normalize(X, axis=-1)
        return X
예제 #10
0
def generator(img_shape, spectr_norm=False, gen_out=None):
    input_layer = Input(shape=img_shape)
    # Part 1. Feature Extraction Network
    filter_outputs = [
        64, 54, 48, 43, 39, 35, 31, 28, 25, 22, 18, 16, 24, 8, 8, 32, 16
    ]
    fe_layers = []
    for i in range(12):
        if i == 0:
            fe = conv2d(input_layer,
                        filters=filter_outputs[i],
                        kernel_size=3,
                        stride=2,
                        padding='same',
                        sn=spectr_norm)
            fe = LeakyReLU(0.1)(fe)
            fe = Dropout(0.2)(fe)
        else:
            fe = conv2d(fe,
                        filters=filter_outputs[i],
                        kernel_size=3,
                        stride=1,
                        padding='same',
                        sn=spectr_norm)
            fe = LeakyReLU(0.1)(fe)
            fe = Dropout(0.2)(fe)

        fe_layers.append(fe)

    fe_final_layer = Concatenate()(fe_layers)

    # Part 2.1 Reconstruction Network

    a1 = conv2d(fe_final_layer,
                filters=filter_outputs[12],
                kernel_size=1,
                stride=1,
                padding='same',
                sn=spectr_norm)
    a1 = LeakyReLU(0.1)(a1)
    a1 = Dropout(0.2)(a1)

    b1 = conv2d(fe_final_layer,
                filters=filter_outputs[13],
                kernel_size=1,
                stride=1,
                padding='same',
                sn=spectr_norm)
    b1 = LeakyReLU(0.1)(b1)
    b1 = Dropout(0.2)(b1)

    b2 = conv2d(b1,
                filters=filter_outputs[14],
                kernel_size=3,
                stride=1,
                padding='same',
                sn=spectr_norm)
    b2 = LeakyReLU(0.1)(b2)
    b2 = Dropout(0.2)(b2)

    reconstructed = Concatenate()([a1, b2])

    # Part 2.2 Upsampling
    c1 = conv2d(reconstructed,
                filters=filter_outputs[15],
                kernel_size=3,
                stride=1,
                padding='same',
                sn=spectr_norm)
    c1 = LeakyReLU(0.1)(c1)

    c2 = upsample(c1,
                  filters=filter_outputs[16],
                  kernel_size=4,
                  stride=2,
                  padding='same')
    c2 = LeakyReLU(0.1)(c2)

    output = conv2d(c2,
                    filters=1,
                    kernel_size=3,
                    stride=1,
                    padding='same',
                    bias=False,
                    activation=gen_out)

    return Model(input_layer, output)
예제 #11
0
def sft_generator(img_shape, hu_min, hu_max, spectr_norm=False, gen_out=None):
    input_layer = Input(shape=img_shape)

    # Part 1. Feature Extraction Network
    filter_outputs = [
        64, 54, 48, 43, 39, 35, 31, 28, 25, 22, 18, 16, 24, 8, 8, 32, 16
    ]
    fe_layers = []
    for i in range(12):
        if i == 0:
            # segmentation map
            indices = tf.histogram_fixed_width_bins(
                input_layer, [float(hu_min), float(hu_max)], 10)
            seg_map = tf.one_hot(indices, 10)
            seg_map = tf.squeeze(seg_map, axis=3)
            sm = condition()(seg_map)

            # feature map
            fe = conv2d(input_layer,
                        filters=filter_outputs[i],
                        kernel_size=3,
                        stride=2,
                        padding='same',
                        sn=spectr_norm)
            fe = LeakyReLU(0.1)(fe)
            fe = Dropout(0.2)(fe)
        else:
            fe = sft(units=[32, fe.shape[-1]])([fe, sm])
            fe = conv2d(fe,
                        filters=filter_outputs[i],
                        kernel_size=3,
                        stride=1,
                        padding='same',
                        sn=spectr_norm)
            fe = LeakyReLU(0.1)(fe)
            fe = Dropout(0.2)(fe)

        fe_layers.append(fe)

    fe_final_layer = Concatenate()(fe_layers)

    # Part 2.1 Reconstruction Network
    a1 = sft(units=[32, fe_final_layer.shape[-1]])([fe_final_layer, sm])
    a1 = conv2d(a1,
                filters=filter_outputs[12],
                kernel_size=1,
                stride=1,
                padding='same',
                sn=spectr_norm)
    a1 = LeakyReLU(0.1)(a1)
    a1 = Dropout(0.2)(a1)

    b1 = sft(units=[32, fe_final_layer.shape[-1]])([fe_final_layer, sm])
    b1 = conv2d(fe_final_layer,
                filters=filter_outputs[13],
                kernel_size=1,
                stride=1,
                padding='same',
                sn=spectr_norm)
    b1 = LeakyReLU(0.1)(b1)
    b1 = Dropout(0.2)(b1)

    b2 = sft(units=[32, b1.shape[-1]])([b1, sm])
    b2 = conv2d(b1,
                filters=filter_outputs[14],
                kernel_size=3,
                stride=1,
                padding='same',
                sn=spectr_norm)
    b2 = LeakyReLU(0.1)(b2)
    b2 = Dropout(0.2)(b2)

    reconstructed = Concatenate()([a1, b2])

    # Part 2.2 Upsampling
    c1 = conv2d(reconstructed,
                filters=filter_outputs[15],
                kernel_size=3,
                stride=1,
                padding='same',
                sn=spectr_norm)
    c1 = LeakyReLU(0.1)(c1)

    c2 = upsample(c1,
                  filters=filter_outputs[16],
                  kernel_size=4,
                  stride=2,
                  padding='same')
    c2 = LeakyReLU(0.1)(c2)

    output = conv2d(c2,
                    filters=1,
                    kernel_size=3,
                    stride=1,
                    padding='same',
                    bias=False,
                    activation=gen_out)

    return Model(input_layer, output)
예제 #12
0
def discriminator(img_shape, spectr_norm=False):
    filters = 64

    input_layer = Input(shape=img_shape)
    y = conv2d(input_layer,
               filters=filters,
               kernel_size=4,
               stride=1,
               padding='same',
               sn=spectr_norm)
    y = InstanceNormalization(axis=-1, center=True, scale=True)(y)
    y = LeakyReLU(0.1)(y)

    y = conv2d(y,
               filters=filters,
               kernel_size=4,
               stride=1,
               padding='same',
               sn=spectr_norm)
    y = InstanceNormalization(axis=-1, center=True, scale=True)(y)
    y = LeakyReLU(0.1)(y)

    y = conv2d(y,
               filters=filters * 2,
               kernel_size=4,
               stride=1,
               padding='same',
               sn=spectr_norm)
    y = InstanceNormalization(axis=-1, center=True, scale=True)(y)
    y = LeakyReLU(0.1)(y)

    y = conv2d(y,
               filters=filters * 2,
               kernel_size=4,
               stride=1,
               padding='same',
               sn=spectr_norm)
    y = InstanceNormalization(axis=-1, center=True, scale=True)(y)
    y = LeakyReLU(0.1)(y)

    y = conv2d(y,
               filters=filters * 4,
               kernel_size=4,
               stride=1,
               padding='same',
               sn=spectr_norm)
    y = InstanceNormalization(axis=-1, center=True, scale=True)(y)
    y = LeakyReLU(0.1)(y)

    y = conv2d(y,
               filters=filters * 4,
               kernel_size=4,
               stride=1,
               padding='same',
               sn=spectr_norm)
    y = InstanceNormalization(axis=-1, center=True, scale=True)(y)
    y = LeakyReLU(0.1)(y)

    # y = conv2d(y, filters=filters*8, kernel_size=4,
    #            stride=1, padding='same')
    # y = InstanceNormalization(axis=-1, center=True, scale=True)(y)
    # y = LeakyReLU(0.1)(y)

    # y = conv2d(y, filters=filters*8, kernel_size=4,
    #            stride=2, padding='same')

    y = SpatialPyramidPooling2D(bins=[1, 2, 3], data_format='channels_last')(y)
    y = Dense(1024)(y)
    y = LeakyReLU(0.1)(y)
    output = Dense(1)(y)

    return Model(input_layer, output)
def output_unet_block(inputs,
                      residual_input,
                      filters,
                      n_output_channels,
                      data_format='NCHW',
                      is_training=True,
                      conv2d_hparams=None,
                      block_name='output_block'):

    with tf.variable_scope(block_name):

        net = layers.concat([inputs, residual_input],
                            axis=1 if data_format == 'NCHW' else 3)

        net = layers.conv2d(
            net,
            n_channels=filters,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            data_format=data_format,
            use_bias=True,
            trainable=is_training,
            kernel_initializer=conv2d_hparams.kernel_initializer,
            bias_initializer=conv2d_hparams.bias_initializer,
        )

        net = blocks.activation_block(inputs=net,
                                      act_fn=conv2d_hparams.activation_fn,
                                      trainable=is_training,
                                      block_name='act1')

        net = layers.conv2d(
            net,
            n_channels=filters,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            data_format=data_format,
            use_bias=True,
            trainable=is_training,
            kernel_initializer=conv2d_hparams.kernel_initializer,
            bias_initializer=conv2d_hparams.bias_initializer,
        )

        net = blocks.activation_block(inputs=net,
                                      act_fn=conv2d_hparams.activation_fn,
                                      trainable=is_training,
                                      block_name='act2')

        net = layers.conv2d(
            net,
            n_channels=n_output_channels,
            kernel_size=(1, 1),
            strides=(1, 1),
            padding='same',
            data_format=data_format,
            use_bias=True,
            trainable=is_training,
            kernel_initializer=conv2d_hparams.kernel_initializer,
            bias_initializer=conv2d_hparams.bias_initializer,
        )

        return net
def conv2d_block(
    inputs,
    n_channels,
    kernel_size=(3, 3),
    strides=(2, 2),
    mode='SAME',
    use_batch_norm=True,
    activation='relu',
    is_training=True,
    data_format='NHWC',
    conv2d_hparams=None,
    batch_norm_hparams=None,
    name='conv2d',
    cardinality=1,
):

    if not isinstance(conv2d_hparams, tf.contrib.training.HParams):
        raise ValueError(
            "The paramater `conv2d_hparams` is not of type `HParams`")

    if not isinstance(batch_norm_hparams,
                      tf.contrib.training.HParams) and use_batch_norm:
        raise ValueError(
            "The paramater `conv2d_hparams` is not of type `HParams`")

    with tf.variable_scope(name):
        if cardinality == 1:
            net = layers.conv2d(
                inputs,
                n_channels=n_channels,
                kernel_size=kernel_size,
                strides=strides,
                padding=mode,
                data_format=data_format,
                use_bias=not use_batch_norm,
                trainable=is_training,
                kernel_initializer=conv2d_hparams.kernel_initializer,
                bias_initializer=conv2d_hparams.bias_initializer)
        else:
            group_filter = tf.get_variable(
                name=name + 'group_filter',
                shape=[3, 3, n_channels // cardinality, n_channels],
                trainable=is_training,
                dtype=tf.float32)
            net = tf.nn.conv2d(inputs,
                               group_filter,
                               strides=strides,
                               padding='SAME',
                               data_format=data_format)
        if use_batch_norm:
            net = layers.batch_norm(
                net,
                decay=batch_norm_hparams.decay,
                epsilon=batch_norm_hparams.epsilon,
                scale=batch_norm_hparams.scale,
                center=batch_norm_hparams.center,
                is_training=is_training,
                data_format=data_format,
                param_initializers=batch_norm_hparams.param_initializers)

        if activation == 'relu':
            net = layers.relu(net, name='relu')

        elif activation == 'tanh':
            net = layers.tanh(net, name='tanh')

        elif activation != 'linear' and activation is not None:
            raise KeyError('Invalid activation type: `%s`' % activation)

        return net
예제 #15
0
    def __init_network(self):
        with tf.variable_scope('mobilenet_base'):
            # Preprocessing as done in the paper
            with tf.name_scope('pre_processing'):
                preprocessed_input = (self.X - self.mean_img) / 255.0

            # Model is here!
            conv1_1 = conv2d('conv_1', zero_pad(preprocessed_input), num_filters=int(round(32 * self.args.width_multiplier)),
                             kernel_size=(3, 3),
                             padding='VALID', stride=(2, 2), activation=tf.nn.relu6,
                             batchnorm_enabled=self.args.batchnorm_enabled,
                             is_training=self.is_training, l2_strength=self.args.l2_strength, bias=self.args.bias)
            self.__add_to_nodes([conv1_1])
            ############################################################################################
            conv2_1_dw, conv2_1_pw = depthwise_separable_conv2d('conv_ds_2', zero_pad(conv1_1),
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=64, kernel_size=(3, 3), padding='VALID',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv2_1_dw, conv2_1_pw])

            conv2_2_dw, conv2_2_pw = depthwise_separable_conv2d('conv_ds_3', zero_pad(conv2_1_pw),
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=128, kernel_size=(3, 3), padding='VALID',
                                                                stride=(2, 2),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv2_2_dw, conv2_2_pw])
            ############################################################################################
            conv3_1_dw, self.conv3_1_pw = depthwise_separable_conv2d('conv_ds_4', zero_pad(conv2_2_pw),
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=128, kernel_size=(3, 3), padding='VALID',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv3_1_dw, self.conv3_1_pw])

            conv3_2_dw, conv3_2_pw = depthwise_separable_conv2d('conv_ds_5', zero_pad(self.conv3_1_pw),
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=256, kernel_size=(3, 3), padding='VALID',
                                                                stride=(2, 2),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv3_2_dw, conv3_2_pw])
            ############################################################################################
            conv4_1_dw, self.conv4_1_pw = depthwise_separable_conv2d('conv_ds_6', zero_pad(conv3_2_pw),
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=256, kernel_size=(3, 3), padding='VALID',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv4_1_dw, self.conv4_1_pw])

            conv4_2_dw, conv4_2_pw = depthwise_separable_conv2d('conv_ds_7', zero_pad(self.conv4_1_pw),
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=512, kernel_size=(3, 3), padding='VALID',
                                                                stride=(2, 2),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv4_2_dw, conv4_2_pw])
            ############################################################################################
            conv5_1_dw, conv5_1_pw = depthwise_separable_conv2d('conv_ds_8', zero_pad(conv4_2_pw),
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=512, kernel_size=(3, 3), padding='VALID',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv5_1_dw, conv5_1_pw])

            conv5_2_dw, self.conv5_2_pw = depthwise_separable_conv2d('conv_ds_9', zero_pad(conv5_1_pw),
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=512, kernel_size=(3, 3), padding='VALID',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv5_2_dw, self.conv5_2_pw])
            ############################################################################################
            conv6_1_dw, conv6_1_pw = depthwise_separable_conv2d('conv_ds_10', zero_pad(self.conv5_2_pw),
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=1024, kernel_size=(3, 3), padding='VALID',
                                                                stride=(2, 2),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))

            self.__add_to_nodes([conv6_1_dw, conv6_1_pw])
            conv6_2_dw, self.conv6_2_pw = depthwise_separable_conv2d('conv_ds_11', zero_pad(conv6_1_pw),
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=1024, kernel_size=(3, 3), padding='VALID',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))

            self.__add_to_nodes([conv6_2_dw, self.conv6_2_pw])