Beispiel #1
0
    def res_block(self,
                  input_,
                  output_dim,
                  is_downsizing=True,
                  name="res_block"):
        with tf.variable_scope(name):
            act1 = self.res_act(input_, name='act1')

            if is_downsizing:
                skip2 = layers.bottleneck_layer(act1,
                                                output_dim,
                                                d_h=2,
                                                d_w=2,
                                                name='skip1')
                _, conv1 = layers.conv2d_same_act(
                    act1,
                    output_dim,
                    d_h=2,
                    d_w=2,
                    activation_fn=self.activation_fn,
                    with_logit=True,
                    name='conv1')
            else:
                skip2 = layers.bottleneck_layer(act1,
                                                output_dim,
                                                d_h=1,
                                                d_w=1,
                                                name='skip1')
                _, conv1 = layers.conv2d_same_act(
                    act1,
                    output_dim,
                    d_h=1,
                    d_w=1,
                    activation_fn=self.activation_fn,
                    with_logit=True,
                    name='conv1')
            conv2 = layers.conv2d_same(conv1, output_dim, name='conv2')
            res1 = tf.add(skip2, conv2, name='res1')

            act2 = self.res_act(res1, name='act2')
            _, conv3 = layers.conv2d_same_repeat(
                act2,
                output_dim,
                num_repeat=2,
                d_h=1,
                d_w=1,
                activation_fn=self.activation_fn,
                with_logit=True,
                name='conv3')
            res2 = tf.add(res1, conv3, name='res2')

            return res2
Beispiel #2
0
 def conv_res_conv_block(self,
                         input_,
                         output_dim,
                         name="conv_res_conv_block"):
     with tf.variable_scope(name):
         conv1 = layers.conv2d_same_act(input_,
                                        output_dim,
                                        activation_fn=self.act_fn,
                                        with_logit=False,
                                        name="conv1")
         res = self.res_block_with_3_conv_layers(conv1,
                                                 output_dim,
                                                 name="res_block")
         conv2 = layers.conv2d_same_act(res,
                                        output_dim,
                                        activation_fn=self.act_fn,
                                        with_logit=False,
                                        name="conv2")
         return conv2
Beispiel #3
0
    def basicBlock(self, input_, output_dim, stride=1, name='basic'):
        assert output_dim is not 0

        with tf.variable_scope(name):
            input_dim = input_.get_shape().as_list()[-1]

            bn = layers.batch_norm(input_, name='bn')
            act = self.act_fn(bn, name='act')

            conv1 = layers.conv2d_same_act(act, input_dim, activation_fn=self.act_fn, name='conv_1')
            conv2 = layers.conv2d_same(conv1, output_dim, d_h=stride, d_w=stride, name='conv_2')
            # make sure that identity is input_ and output of this block is conv2
            sc = self.shortcut(conv2, input_, name='short')

            return sc
Beispiel #4
0
    def inference(self, input_, reuse=False):
        with tf.variable_scope('ResNet') as scope:
            if reuse:
                scope.reuse_variables()

            conv1 = layers.conv2d_same_act(input_, self.num_kernel, k_h=7, k_w=7, d_h=2, d_w=2,
                                           activation_fn=self.act_fn, name='conv_1')

            pool1 = layers.max_pool(conv1, k_h=self.pool_kernel, k_w=self.pool_kernel,
                                    padding='SAME', name='pool1')

            layer_blocks = self.layer_repeat(pool1, self.layer_def, name='layers')

            pool2 = layers.global_avg_pool(layer_blocks, name='pool2')

            flat = layers.flatten(pool2, 'flat')

            linear = layers.linear(flat, self.num_class, name='linear')

            logit = tf.sigmoid(linear, name='logit')

            return logit
Beispiel #5
0
    def _inference(self, input_):
        conv1 = layers.conv2d_same_act(input_,
                                       16,
                                       activation_fn=self.activation_fn,
                                       name='conv1')
        skip1 = layers.bottleneck_layer(conv1, 32, name='skip1')
        _, conv2 = layers.conv2d_same_repeat(conv1,
                                             32,
                                             num_repeat=2,
                                             activation_fn=self.activation_fn,
                                             with_logit=True,
                                             name='conv2')

        res1 = tf.add(skip1, conv2, name='res1')
        res_act1 = self.res_act(res1)

        _, conv3 = layers.conv2d_same_repeat(res_act1,
                                             32,
                                             num_repeat=2,
                                             activation_fn=self.activation_fn,
                                             with_logit=True,
                                             name='conv3')

        res2 = tf.add(conv3, res1, name='res2')
        res_act2 = self.res_act(res2)

        skip2 = layers.bottleneck_layer(res_act2,
                                        64,
                                        d_h=2,
                                        d_w=2,
                                        name='skip2')
        conv4 = layers.conv2d_same_act(res_act2,
                                       64,
                                       d_h=2,
                                       d_w=2,
                                       activation_fn=self.activation_fn,
                                       name='conv4')
        conv5 = layers.conv2d_same(conv4, 64, name='conv5')

        res3 = tf.add(skip2, conv5, name='res3')
        res_act3 = self.res_act(res3)

        _, conv6 = layers.conv2d_same_repeat(res_act1,
                                             64,
                                             num_repeat=2,
                                             activation_fn=self.activation_fn,
                                             with_logit=True,
                                             name='conv3')

        res4 = tf.add(res3, conv6, name='res4')
        res_act4 = self.res_act(res4)

        skip3 = layers.bottleneck_layer(res_act4,
                                        128,
                                        d_h=2,
                                        d_w=2,
                                        name='skip3')
        conv7 = layers.conv2d_same_act(res_act4,
                                       128,
                                       d_h=2,
                                       d_w=2,
                                       activation_fn=self.activation_fn,
                                       name='conv7')
        conv8 = layers.conv2d_same(conv7, 128, name='conv8')

        res5 = tf.add(skip3, conv8, name='res5')

        res_act5 = self.res_act(res5)
        _, conv9 = layers.conv2d_same_repeat(res_act5,
                                             128,
                                             num_repeat=2,
                                             activation_fn=self.activation_fn,
                                             with_logit=True,
                                             name='conv9')

        res6 = tf.add(res5, conv9, name='res6')
        res_act6 = self.res_act(res6)

        pool = layers.avg_pool(res_act6,
                               k_h=8,
                               k_w=8,
                               d_h=1,
                               d_w=1,
                               name='pool')
        flat = layers.flatten(pool, 'flat')

        linear = layers.linear(flat, self.num_class, name='linear')

        return linear