Exemple #1
0
    def __call__(self, x, is_training=False):

        inter_channel = x.shape[-1] * 4

        wc1 = tf.compat.v1.get_variable(
            "{}_wc1".format(self.name), [1, 1, x.shape[-1], inter_channel],
            initializer=tf.random_normal_initializer(stddev=0.02),
            trainable=is_training)

        wc2 = tf.compat.v1.get_variable(
            "{}_wc2".format(self.name), [3, 3, inter_channel, self.filters],
            initializer=tf.random_normal_initializer(stddev=0.02),
            trainable=is_training)

        # 1x1 Convolution (Bottleneck layer)
        x = bn(x, is_training)
        if self.act:
            x = self.act(x)
        x = tf.nn.conv2d(x, wc1, strides=(1, 1, 1, 1), padding=self.padding)
        if self.dp > 0:
            x = dropout(x, self.dp)

        # 3x3 Convolution
        x = bn(x, is_training)
        if self.act:
            x = self.act(x)
        x = x = tf.nn.conv2d(x,
                             wc2,
                             strides=(1, 1, 1, 1),
                             padding=self.padding)
        if self.dp > 0:
            x = dropout(x, self.dp)

        print(x)
        return x
Exemple #2
0
    def __call__(self, x, is_training=False):

        # setup layer
        x = tf.layers.separable_conv2d(
            x,
            filters=self.output_size,
            bias_initializer=tf.zeros_initializer(),
            kernel_size=[self.kernel, self.kernel]
            if type(self.kernel) is type(int) else self.kernel,
            strides=[self.stride, self.stride],
            padding=self.padding,
            trainable=is_training,
            name=self.name)

        # batch normalization
        if self.bn:
            x = bn(x, is_training=is_training, name="{}_bn_".format(self.name))

        if self.ln:
            # x = norm( x, self.name )
            x = ln(x, is_training=is_training)

        # activation
        if not self.act is None:
            x = self.act(x, name="{}_act".format(self.name))

        # setup dropout
        if self.dropout > 0 and is_training:
            x = dropout(x, self.dropout, name="{}_dp".format(self.name))

        print(x)
        return x
Exemple #3
0
    def __call__(self, x, is_training=False, filters=None):

        if not filters is None:
            self.output_size = filters

        with tf.variable_scope(self.name):

            # kernel shape
            k = [ self.kernel, self.kernel, self.output_size, x.shape[-1] ] if type(self.kernel) is int \
                else [ self.kernel[0], self.kernel[1], self.output_size, x.shape[-1] ]

            out_shape = [
                tf.shape(x)[0], x.shape[1] * self.stride,
                x.shape[2] * self.stride, self.output_size
            ]

            # kernel
            w = tf.compat.v1.get_variable(
                "_w_2dt",
                k,
                # initializer = tf.contrib.layers.xavier_initializer(),
                initializer=tf.random_normal_initializer(stddev=0.02),
                # initializer = tf.compat.v1.initializers.identity(),
                trainable=is_training)
            strides = ( 1, self.stride, self.stride, 1 ) if type(self.stride) is int \
                    else ( 1, self.stride[0], self.stride[1], 1 )

            # setup layer
            x = tf.nn.conv2d_transpose(x,
                                       w,
                                       out_shape,
                                       strides,
                                       padding=self.padding)

            if self.bias:
                b = tf.compat.v1.get_variable(
                    '_b_', [self.output_size],
                    initializer=tf.constant_initializer(0),
                    trainable=is_training)
                x = tf.nn.bias_add(x, b, name="_bias")

            # batch normalization
            if self.bn:
                x = bn(x, is_training=is_training, name="_bn")

            # activation
            if not self.act is None:
                x = self.act(x, name="_act")

            # setup dropout
            if self.dropout > 0 and is_training:
                x = dropout(x, self.dropout, name="_dp")

        print(x)
        return x
Exemple #4
0
    def __call__(self, x, is_training=False): 

        # Initial convolution
        x = self.c1( x, is_training )
        x = bn( x, is_training )
        x = self.act( x )
        x = maxpool2d( x, 3, 2 )

        # Add dense blocks
        for block_idx in range( self.nb_dense_block - 1 ):            
            x = self.d_blocks[block_idx]( x, is_training )
            # Add transition_block
            x = self.transition_block[block_idx]( x, is_training )
            x = self.sne[block_idx]( x, is_training )
        x = self.d_blocks[-1]( x, is_training )
        self.sne[-1]( x, is_training )

        x = bn( x, is_training )
        x = self.act( x )

        return x
Exemple #5
0
    def __call__(self, x, is_training=False):

        inter_channel = x.shape.as_list()[-1]

        wc1 = tf.compat.v1.get_variable(
            "{}_wc1".format(self.name),
            [1, 1, x.shape[-1],
             int(inter_channel * self.compression)],
            initializer=tf.random_normal_initializer(stddev=0.02),
            trainable=is_training)

        # 1x1 Convolution (Bottleneck layer)
        x = bn(x, is_training)
        if self.act:
            x = self.act(x)
        x = tf.nn.conv2d(x, wc1, strides=(1, 1, 1, 1), padding=self.padding)
        if self.dp > 0:
            x = dropout(x, self.dp)

        x = avgpool2d(x, 2, 2)

        print(x)
        return x
Exemple #6
0
    def __call__(self, x, is_training=False, filters=None):

        if not filters is None:
            self.output_size = filters

        with tf.variable_scope(self.name):

            # kernel shape
            k = [ self.kernel, self.kernel, x.shape[-1], self.output_size ] if type(self.kernel) is int \
                else [ self.kernel[0], self.kernel[1], x.shape[-1], self.output_size ]

            # kernel
            gt = tf.get_variable(
                "_w_gt_2d",
                k,
                # initializer = tf.contrib.layers.xavier_initializer(),
                initializer=tf.truncated_normal_initializer(stddev=0.02),
                trainable=is_training)

            wt = tf.get_variable(
                "_w_wt_2d",
                k,
                # initializer = tf.contrib.layers.xavier_initializer(),
                initializer=tf.truncated_normal_initializer(stddev=0.02),
                trainable=is_training)

            mt = tf.get_variable(
                "_w_mt_2d",
                k,
                # initializer = tf.contrib.layers.xavier_initializer(),
                initializer=tf.truncated_normal_initializer(stddev=0.02),
                trainable=is_training)

            strides = ( 1, self.stride, self.stride, 1 ) if type(self.stride) is int \
                    else ( 1, self.stride[0], self.stride[1], 1 )

            with tf.variable_scope('nac_w'):
                w = tf.multiply(tf.tanh(wt), tf.sigmoid(mt))

            with tf.variable_scope('simple_nac'):
                a = tf.nn.conv2d(x, w, strides, padding=self.padding)

            with tf.variable_scope('complex_nac'):
                # m = tf.exp( tf.nn.conv2d( tf.log( tf.abs( x ) + 1e-10 ), w, strides, padding = self.padding ) )
                m = tf.sinh(
                    tf.nn.conv2d(tf.asinh(x), w, strides,
                                 padding=self.padding))

            with tf.variable_scope('math_gate'):
                gc = tf.nn.sigmoid(
                    tf.nn.conv2d(x, gt, strides, padding=self.padding))

            with tf.variable_scope('result'):
                x = (gc * a) + ((1 - gc) * m)

            # if self.bias:
            #     b = tf.compat.v1.get_variable( '_b_',
            #                                 [ 1, 1, 1, self.output_size ],
            #                                 initializer = tf.constant_initializer( 0.0 ),
            #                                 trainable = is_training )
            #     x += b

            # batch normalization
            if self.bn:
                x = bn(x, is_training=is_training, name="_bn")

            # activation
            if not self.act is None:
                x = self.act(x, name="_act")

            # setup dropout
            if self.dropout > 0 and is_training:
                x = dropout(x, self.dropout, name="_dp")

        print(x)
        return x