예제 #1
0
 def res_block_with_n_conv_layers(self,
                                  input_,
                                  output_dim,
                                  num_repeat,
                                  name="res_block"):
     output_ = layers.conv2d_same_repeat(input_,
                                         output_dim,
                                         num_repeat=num_repeat,
                                         activation_fn=self.act_fn,
                                         name=name)
     return self.skip_connection(input_, output_)
예제 #2
0
파일: VGG.py 프로젝트: oryondark/fusion-net
    def inference(self, input_):
        conv1 = layers.conv2d_same_repeat(input_,
                                          self.kernel_num,
                                          num_repeat=2,
                                          name="down1")
        pool1 = layers.max_pool(conv1, name="pool1")

        conv2 = layers.conv2d_same_repeat(pool1,
                                          self.kernel_num * 2,
                                          num_repeat=2,
                                          name="down2")
        pool2 = layers.max_pool(conv2, name="pool2")

        conv3 = layers.conv2d_same_repeat(pool2,
                                          self.kernel_num * 4,
                                          num_repeat=3,
                                          name="down3")
        pool3 = layers.max_pool(conv3, name="pool3")

        conv4 = layers.conv2d_same_repeat(pool3,
                                          self.kernel_num * 8,
                                          num_repeat=3,
                                          name="down4")
        pool4 = layers.max_pool(conv4, name="pool4")

        conv5 = layers.conv2d_same_repeat(pool4,
                                          self.kernel_num * 8,
                                          num_repeat=3,
                                          name="down5")
        pool5 = layers.max_pool(conv5, name="pool5")

        flat = layers.flatten(pool5, 'flat')

        linear = layers.linear(flat,
                               flat.get_shape().as_list()[-1],
                               name='linear')

        logits = layers.linear(linear, self.num_class, name='logits')

        return logits
예제 #3
0
    def res_block(self,
                  input_,
                  output_dim,
                  is_downsizing=True,
                  name="res_block"):
        with tf.variable_scope(name):
            act1 = self.res_act(input_, name='act1')

            if is_downsizing:
                skip2 = layers.bottleneck_layer(act1,
                                                output_dim,
                                                d_h=2,
                                                d_w=2,
                                                name='skip1')
                _, conv1 = layers.conv2d_same_act(
                    act1,
                    output_dim,
                    d_h=2,
                    d_w=2,
                    activation_fn=self.activation_fn,
                    with_logit=True,
                    name='conv1')
            else:
                skip2 = layers.bottleneck_layer(act1,
                                                output_dim,
                                                d_h=1,
                                                d_w=1,
                                                name='skip1')
                _, conv1 = layers.conv2d_same_act(
                    act1,
                    output_dim,
                    d_h=1,
                    d_w=1,
                    activation_fn=self.activation_fn,
                    with_logit=True,
                    name='conv1')
            conv2 = layers.conv2d_same(conv1, output_dim, name='conv2')
            res1 = tf.add(skip2, conv2, name='res1')

            act2 = self.res_act(res1, name='act2')
            _, conv3 = layers.conv2d_same_repeat(
                act2,
                output_dim,
                num_repeat=2,
                d_h=1,
                d_w=1,
                activation_fn=self.activation_fn,
                with_logit=True,
                name='conv3')
            res2 = tf.add(res1, conv3, name='res2')

            return res2
예제 #4
0
    def inference(self, input_):
        conv1 = layers.conv2d_same_repeat(input_, self.kernel_num, num_repeat=2, activation_fn=self.act_fn, name="down1")
        pool1 = layers.max_pool(conv1, name="pool1")

        conv2 = layers.conv2d_same_repeat(pool1, self.kernel_num * 2, num_repeat=2, activation_fn=self.act_fn, name="down2")
        pool2 = layers.max_pool(conv2, name="pool2")

        conv3 = layers.conv2d_same_repeat(pool2, self.kernel_num * 4, num_repeat=3, activation_fn=self.act_fn, name="down3")
        pool3 = layers.max_pool(conv3, name="pool3")

        conv4 = layers.conv2d_same_repeat(pool3, self.kernel_num * 8, num_repeat=3, activation_fn=self.act_fn, name="down4")
        pool4 = layers.max_pool(conv4, name="pool4")

        conv5 = layers.conv2d_same_repeat(pool4, self.kernel_num * 8, num_repeat=3, activation_fn=self.act_fn, name="down5")
        pool5 = layers.global_avg_pool(conv5, name="pool5")

        conv6 = layers.bottleneck_act(pool5, self.kernel_num * 8, activation_fn=self.act_fn, name="down6")
        conv7 = layers.bottleneck_layer(conv6, self.num_class, name="down7")

        logits = layers.flatten(conv7, 'flat')

        return logits
예제 #5
0
    def encoder(self, input_):
        self.down1 = layers.conv2d_same_repeat(input_,
                                               self.kernel_num,
                                               num_repeat=2,
                                               name="down1")
        pool1 = layers.max_pool(self.down1, name="pool1")

        self.down2 = layers.conv2d_same_repeat(pool1,
                                               self.kernel_num * 2,
                                               num_repeat=2,
                                               name="down2")
        pool2 = layers.max_pool(self.down2, name="pool2")

        self.down3 = layers.conv2d_same_repeat(pool2,
                                               self.kernel_num * 4,
                                               num_repeat=2,
                                               name="down3")
        pool3 = layers.max_pool(self.down3, name="pool3")

        self.down4 = layers.conv2d_same_repeat(pool3,
                                               self.kernel_num * 8,
                                               num_repeat=2,
                                               name="down4")
        pool4 = layers.max_pool(self.down4, name="pool4")

        if self.log == 1:
            print("encoder input : ", input_.get_shape())
            print("conv1 : ", self.down1.get_shape())
            print("pool1 : ", pool1.get_shape())
            print("conv2 : ", self.down2.get_shape())
            print("pool2 : ", pool2.get_shape())
            print("conv3 : ", self.down3.get_shape())
            print("pool3 : ", pool3.get_shape())
            print("conv4 : ", self.down4.get_shape())
            print("pool4 : ", pool4.get_shape())

        return pool4
예제 #6
0
    def inference(self, input_):
        encode_vec = self.encoder(input_)
        bridge = layers.conv2d_same_repeat(encode_vec,
                                           self.kernel_num * 16,
                                           num_repeat=2,
                                           name="bridge")
        decode_vec = self.decoder(bridge)
        output = layers.bottleneck_layer(decode_vec,
                                         self.output_dim,
                                         name="output")

        if self.log == 1:
            print("output : ", output.get_shape())

        print("Complete!!")

        return output
예제 #7
0
    def _inference(self, input_):
        conv1 = layers.conv2d_same_act(input_,
                                       16,
                                       activation_fn=self.activation_fn,
                                       name='conv1')
        skip1 = layers.bottleneck_layer(conv1, 32, name='skip1')
        _, conv2 = layers.conv2d_same_repeat(conv1,
                                             32,
                                             num_repeat=2,
                                             activation_fn=self.activation_fn,
                                             with_logit=True,
                                             name='conv2')

        res1 = tf.add(skip1, conv2, name='res1')
        res_act1 = self.res_act(res1)

        _, conv3 = layers.conv2d_same_repeat(res_act1,
                                             32,
                                             num_repeat=2,
                                             activation_fn=self.activation_fn,
                                             with_logit=True,
                                             name='conv3')

        res2 = tf.add(conv3, res1, name='res2')
        res_act2 = self.res_act(res2)

        skip2 = layers.bottleneck_layer(res_act2,
                                        64,
                                        d_h=2,
                                        d_w=2,
                                        name='skip2')
        conv4 = layers.conv2d_same_act(res_act2,
                                       64,
                                       d_h=2,
                                       d_w=2,
                                       activation_fn=self.activation_fn,
                                       name='conv4')
        conv5 = layers.conv2d_same(conv4, 64, name='conv5')

        res3 = tf.add(skip2, conv5, name='res3')
        res_act3 = self.res_act(res3)

        _, conv6 = layers.conv2d_same_repeat(res_act1,
                                             64,
                                             num_repeat=2,
                                             activation_fn=self.activation_fn,
                                             with_logit=True,
                                             name='conv3')

        res4 = tf.add(res3, conv6, name='res4')
        res_act4 = self.res_act(res4)

        skip3 = layers.bottleneck_layer(res_act4,
                                        128,
                                        d_h=2,
                                        d_w=2,
                                        name='skip3')
        conv7 = layers.conv2d_same_act(res_act4,
                                       128,
                                       d_h=2,
                                       d_w=2,
                                       activation_fn=self.activation_fn,
                                       name='conv7')
        conv8 = layers.conv2d_same(conv7, 128, name='conv8')

        res5 = tf.add(skip3, conv8, name='res5')

        res_act5 = self.res_act(res5)
        _, conv9 = layers.conv2d_same_repeat(res_act5,
                                             128,
                                             num_repeat=2,
                                             activation_fn=self.activation_fn,
                                             with_logit=True,
                                             name='conv9')

        res6 = tf.add(res5, conv9, name='res6')
        res_act6 = self.res_act(res6)

        pool = layers.avg_pool(res_act6,
                               k_h=8,
                               k_w=8,
                               d_h=1,
                               d_w=1,
                               name='pool')
        flat = layers.flatten(pool, 'flat')

        linear = layers.linear(flat, self.num_class, name='linear')

        return linear
예제 #8
0
    def decoder(self, input_):
        conv_trans4 = layers.conv2dTrans_same_act(input_,
                                                  self.down4.get_shape(),
                                                  activation_fn=self.act_fn,
                                                  with_logit=False,
                                                  name="unpool4")
        skip4 = self.skip_connection(conv_trans4, self.down4)
        up4 = layers.conv2d_same_repeat(skip4,
                                        self.kernel_num * 8,
                                        num_repeat=2,
                                        name="up4")

        conv_trans3 = layers.conv2dTrans_same_act(up4,
                                                  self.down3.get_shape(),
                                                  activation_fn=self.act_fn,
                                                  with_logit=False,
                                                  name="unpool3")
        skip3 = self.skip_connection(conv_trans3, self.down3)
        up3 = layers.conv2d_same_repeat(skip3,
                                        self.kernel_num * 4,
                                        num_repeat=2,
                                        name="up3")

        conv_trans2 = layers.conv2dTrans_same_act(up3,
                                                  self.down2.get_shape(),
                                                  activation_fn=self.act_fn,
                                                  with_logit=False,
                                                  name="unpool2")
        skip2 = self.skip_connection(conv_trans2, self.down2)
        up2 = layers.conv2d_same_repeat(skip2,
                                        self.kernel_num * 2,
                                        num_repeat=2,
                                        name="up2")

        conv_trans1 = layers.conv2dTrans_same_act(up2,
                                                  self.down1.get_shape(),
                                                  activation_fn=self.act_fn,
                                                  with_logit=False,
                                                  name="unpool1")
        skip1 = self.skip_connection(conv_trans1, self.down1)
        up1 = layers.conv2d_same_repeat(skip1,
                                        self.kernel_num,
                                        num_repeat=2,
                                        name="up1")

        if self.log == 1:
            print("dncoder input : ", input_.get_shape())
            print("convT1 : ", conv_trans4.get_shape())
            print("res1 : ", skip4.get_shape())
            print("up1 : ", up4.get_shape())
            print("convT2 : ", conv_trans3.get_shape())
            print("res2 : ", skip3.get_shape())
            print("up2 : ", up3.get_shape())
            print("convT3 : ", conv_trans2.get_shape())
            print("res3 : ", skip2.get_shape())
            print("up3 : ", up2.get_shape())
            print("convT4 : ", conv_trans1.get_shape())
            print("res4 : ", skip1.get_shape())
            print("up4 : ", up1.get_shape())

        return up1