示例#1
0
def inference(image, output_shape, keep_prob, is_train):
    wd = 0.0001
    leaky_param = 0.01
    input_shape = image.get_shape().as_list()
    conv1 = mf.convolution_2d_layer(image, [5, 5, 3, 64], [1, 1], 'SAME', wd,
                                    'conv1')
    conv1_pool = mf.maxpool_2d_layer(conv1, [2, 2], [2, 2], 'maxpool1')

    res1 = mf.copy_layer(conv1_pool, mf.res_layer, 3, 'res', [3, 3, 64, 64],
                         [1, 1], "SAME", wd, 'res_1', 2, leaky_param, is_train)
    res1_pool = mf.maxpool_2d_layer(res1, [2, 2], [2, 2], 'maxpool2')
    res1_pad = mf.res_pad(res1_pool, 64, 128, 'res_pad1')

    res2 = mf.copy_layer(res1_pad, mf.res_layer, 4, 'res', [3, 3, 128, 128],
                         [1, 1], "SAME", wd, 'res_2', 2, leaky_param, is_train)
    res2_pool = mf.maxpool_2d_layer(res2, [2, 2], [2, 2], 'maxpool2')
    res2_pad = mf.res_pad(res2_pool, 128, 256, 'res_pad2')

    res3 = mf.copy_layer(res2_pad, mf.res_layer, 6, 'res', [3, 3, 256, 256],
                         [1, 1], "SAME", wd, 'res_3', 2, leaky_param, is_train)
    #res3_pool = mf.maxpool_2d_layer(res3, [2,2], [2,2], 'maxpool3')
    #res3_pad = mf.res_pad(res3_pool, 256, 512, 'res_pad3')

    #res4 = mf.copy_layer(res3_pad, mf.res_layer, 3, 'res', [3,3, 512, 512], [1,1], "SAME", wd, 'res_4', 2, leaky_param, is_train)

    res1_unpool = mf.unpooling_layer(res1, [input_shape[1], input_shape[2]],
                                     'res1_unpool')
    res2_unpool = mf.unpooling_layer(res2, [input_shape[1], input_shape[2]],
                                     'res2_unpool')
    res3_unpool = mf.unpooling_layer(res3, [input_shape[1], input_shape[2]],
                                     'res3_unpool')
    #res4_unpool = mf.unpooling_layer(res4, [input_shape[1], input_shape[2]], 'res4_unpool')
    hypercolumn = tf.concat(3, [res1_unpool, res2_unpool, res3_unpool],
                            name='hypercolumn')

    output_shape = input_shape
    output_shape[3] = 1
    #heatmap = mf.deconvolution_2d_layer(hypercolumn, [1,1,1,hypercolumn.get_shape()[3]], [1,1], output_shape, 'SAME', wd, 'heatmap')
    heatmap = mf.convolution_2d_layer(
        hypercolumn, [1, 1, hypercolumn.get_shape()[3], 1], [1, 1], 'SAME', wd,
        'heatmap')
    return heatmap
def dense_transition_layer(input_tensor, is_pool, data_format, is_train,
                           leaky_param, wd, layer_name):
    with tf.variable_scope(layer_name):
        if data_format == "NCHW":
            in_channle = input_tensor.get_shape()[1]
        elif data_format == "NHWC":
            in_channle = input_tensor.get_shape()[3]
        else:
            raise NotImplementedError

        if not is_pool:
            conv = mf.convolution_2d_layer(input_tensor, in_channle, [3, 3], [2, 2], "SAME",
                    data_format, True, is_train, leaky_param, wd, "conv")
        else:
            conv = mf.convolution_2d_layer(input_tensor, in_channle, [1, 1], [1, 1], "SAME",
                    data_format, True, is_train, leaky_param, wd, "conv")
            conv = mf.maxpool_2d_layer(conv, [2, 2], [2, 2], data_format, 'maxpool')
        return conv
示例#3
0
    def _add_adapt_pair_loss_single(self, feature, scale, leaky_param,
                                    wd, layer_name):
        with tf.variable_scope(layer_name):
            feat = flip_gradient(feature, scale)
            keep_prob_ph = self.keep_prob_ph
            data_format = "NHWC"

            conv1 = mf.convolution_2d_layer(
                feat, 256, [3, 3], [1, 1],
                "SAME", data_format=data_format, leaky_params=leaky_param,
                wd=wd, layer_name="dann_conv1")

            conv2 = mf.convolution_2d_layer(
                conv1, 512, [3, 3], [1, 1],
                "SAME", data_format=data_format, leaky_params=leaky_param,
                wd=wd, layer_name="dann_conv2")

            conv3 = mf.convolution_2d_layer(
                conv2, 512, [3, 3], [1, 1],
                "SAME", data_format=data_format, leaky_params=leaky_param,
                wd=wd, layer_name="dann_conv3")

            conv3_maxpool = mf.maxpool_2d_layer(
                conv3, [2, 2], [2, 2], data_format=data_format,
                layer_name="dann_maxpool3")

            a_fc1 = mf.fully_connected_layer(
                conv3_maxpool, 2048, leaky_param, wd, "a_fc1")

            a_fc2 = mf.fully_connected_layer(
                a_fc1, 2048, leaky_param, wd, "a_fc2")

            a_fc3 = mf.fully_connected_layer(
                a_fc2, 2, leaky_param, wd, "a_fc3")

        return a_fc3
示例#4
0
文件: model.py 项目: polltooh/st_gan
    def model(self):
        wd = 0.0004
        leaky_param = 0.01
        with tf.variable_scope("G"):
            rf2 = mf.add_leaky_relu(
                mf.fully_connected_layer(self.ran_code_ph, 8 * 8 * 64, wd,
                                         "fc2"), leaky_param)
            rf3 = tf.reshape(rf2, [self.bsize, 8, 8, 64], name="fc3")
            rdeconv1 = mf.add_leaky_relu(
                mf.deconvolution_2d_layer(rf3, [2, 2, 128, 64], [2, 2],
                                          [self.bsize, 16, 16, 128], "VALID",
                                          wd, "deconv1"), leaky_param)

            rdeconv2 = mf.add_leaky_relu(
                mf.deconvolution_2d_layer(rdeconv1, [2, 2, 256, 128], [2, 2],
                                          [self.bsize, 32, 32, 256], "VALID",
                                          wd, "deconv2"), leaky_param)
            deconv1 = mf.add_leaky_relu(
                mf.deconvolution_2d_layer(rdeconv2, [2, 2, 512, 256], [2, 2],
                                          [self.bsize, 64, 64, 512], "VALID",
                                          wd, "deconv3"), leaky_param)

            deconv2 = mf.add_leaky_relu(
                mf.deconvolution_2d_layer(deconv1, [2, 2, 512, 512], [2, 2],
                                          [self.bsize, 128, 128, 512], "VALID",
                                          wd, "deconv4"), leaky_param)
            conv1 = mf.convolution_2d_layer(deconv2, [1, 1, 512, 1], [1, 1],
                                            "SAME", wd, "conv1")

            self.g_image = tf.sigmoid(conv1, "g_image")

            tf.add_to_collection("image_to_write", self.g_image)
            tf.add_to_collection("image_to_write", self.image_data_ph)

        with tf.variable_scope("D"):
            concat = tf.concat(0, [self.g_image, self.image_data_ph])
            #conv1 = mf.convolution_2d_layer(self.image_data_ph, [5, 5, 2, 64], [2,2], "VALID", wd, "conv1")
            conv1 = mf.add_leaky_relu(
                mf.convolution_2d_layer(concat, [3, 3, 1, 32], [2, 2], "SAME",
                                        wd, "conv1"), leaky_param)
            conv1_maxpool = mf.maxpool_2d_layer(conv1, [2, 2], [2, 2],
                                                "maxpool1")

            conv2 = mf.add_leaky_relu(
                mf.convolution_2d_layer(conv1_maxpool, [3, 3, 32, 64], [2, 2],
                                        "SAME", wd, "conv2"), leaky_param)
            conv2_maxpool = mf.maxpool_2d_layer(conv2, [2, 2], [2, 2],
                                                "maxpool2")

            conv3 = mf.add_leaky_relu(
                mf.convolution_2d_layer(conv2, [3, 3, 64, 128], [2, 2], "SAME",
                                        wd, "conv3"), leaky_param)
            conv3_maxpool = mf.maxpool_2d_layer(conv3, [2, 2], [2, 2],
                                                "maxpool3")

            conv4 = mf.add_leaky_relu(
                mf.convolution_2d_layer(conv3_maxpool, [3, 3, 128, 128],
                                        [2, 2], "SAME", wd, "conv4"),
                leaky_param)
            conv4_maxpool = mf.maxpool_2d_layer(conv4, [2, 2], [2, 2],
                                                "maxpool4")
            self.fc = mf.fully_connected_layer(conv4_maxpool, 1, wd, "fc")
示例#5
0
    def model_infer(self, data_ph, model_params):
        input_ph = data_ph.get_input()
        gl_ph = data_ph.get_gl()
        train_test_ph = data_ph.get_train_test()

        input_ph = tf.cond(train_test_ph, lambda: self.add_data_arg(
            input_ph), lambda: input_ph)

        data_format = "NHWC"
        bn = False
        keep_prob_ph = data_ph.get_keep_prob()
        self.keep_prob_ph = keep_prob_ph

        b, _, _, _ = input_ph.get_shape()
        self.b = b

        leaky_param = model_params["leaky_param"]
        wd = model_params["weight_decay"]

        hyper_list = list()

        print(input_ph)
        conv11 = mf.convolution_2d_layer(
            input_ph, 64, [3, 3], [1, 1], "SAME", data_format=data_format,
            leaky_params=leaky_param, wd=wd, layer_name="conv11")
        print(conv11)

        conv1_maxpool = mf.maxpool_2d_layer(conv11, [2, 2],
                                            [2, 2], data_format, "maxpool1")
        print(conv1_maxpool)

        conv21 = mf.convolution_2d_layer(
            conv1_maxpool, 128, [3, 3], [1, 1], "SAME", data_format=data_format,
            leaky_params=leaky_param, wd=wd, layer_name="conv21")
        print(conv21)

        conv2_maxpool = mf.maxpool_2d_layer(conv21, [2, 2],
                                            [2, 2], data_format, "maxpool2")
        print(conv2_maxpool)

        conv31 = mf.convolution_2d_layer(
            conv2_maxpool, 256, [3, 3], [1, 1], "SAME", data_format=data_format,
            leaky_params=leaky_param, wd=wd, layer_name="conv31")

        print(conv31)

        if model_params['adapt']:
            if self.model_params["adapt_loss_type"] == "MULTI":
                self._add_adapt_multi_loss(conv31,
                                           gl_ph, leaky_param, wd)
            elif self.model_params["adapt_loss_type"] == "PAIR":
                self._add_adapt_pair_loss(conv31,
                                          gl_ph, leaky_param, wd)
            else:
                raise NotImplementedError

        conv3_maxpool = mf.maxpool_2d_layer(
                conv31, [2, 2], [2, 2], data_format, "maxpool3")

        conv41 = mf.convolution_2d_layer(
            conv3_maxpool, 256, [3, 3], [1, 1], "SAME", data_format=data_format,
            leaky_params=leaky_param, wd=wd, layer_name="conv4")

        fc1 = mf.fully_connected_layer(
            conv41, 2048, leaky_param, wd, "fc1")

        fc1_drop = tf.nn.dropout(fc1, keep_prob_ph, name="dropout1")

        fc2 = mf.fully_connected_layer(
            fc1_drop, 1024, leaky_param, wd, "fc2")

        fc2_drop = tf.nn.dropout(fc2, keep_prob_ph, name="dropout2")

        fc3 = mf.fully_connected_layer(fc2_drop, 10, 0.0, wd, "fc3")

        self.fc = fc3