Exemplo n.º 1
0
    def resnet_without_bottleneck(self, input, is_training,layer_from_2=[2,2,2,2],first_kernel=7,first_stride=2,first_pool=True,stride=2):

        input_shape = input.get_shape().as_list()[1:]
        conv=ops.conv2d(input,'initial_conv',[first_kernel,first_kernel,input_shape[2],64],[1,first_stride,first_stride,1])
        if first_pool:
            conv=ops.max_pool(conv, [1, 3, 3, 1], [1, 2, 2, 1])

        for i in range(layer_from_2[0]):
            conv=ops.residual_block(conv,'Block_1_'+str(i),is_training,64,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[1]):
            conv=ops.residual_block(conv,'Block_2_'+str(i),is_training,128,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[2]):
            conv=ops.residual_block(conv,'Block_3_'+str(i),is_training,256,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[3]):
            conv=ops.residual_block(conv,'Block_4_'+str(i),is_training,512,kernel=3,first_block=True,stride=stride)

        with tf.variable_scope('unit'):
            conv = ops.batch_normalization(conv,is_training)
            conv = tf.nn.relu(conv)
            conv = ops.global_avg_pool(conv)
            conv =ops.flatten(conv)
        with tf.variable_scope('logit'):
            conv = ops.get_hidden_layer(conv,'output_layer',self.no_of_classes,'none')
        return conv
Exemplo n.º 2
0
def ResNet(image, is_train, num_classes=10, reuse=False, name='ResNet-cifar'):
    channel_list = [16, 32, 64]
    with tf.variable_scope(name):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        else:
            assert tf.get_variable_scope().reuse is False
        Res_Block_0 = tf.nn.relu(batch_norm_layer(conv(image, 16, 3, 'Block_0/res_conv_0', s=1, use_bias=False), is_train))
        Res_Block_1 = residual_block(Res_Block_0, channel_list[0], False, is_train, 'Block_1')
        Res_Block_2 = residual_block(Res_Block_1, channel_list[1], True, is_train, 'Block_2')
        Res_Block_3 = residual_block(Res_Block_2, channel_list[2], True, is_train, 'Block_3')
        ave_vec = tf.reshape(tf.nn.avg_pool(Res_Block_3, [1, 8, 8, 1], [1, 1, 1, 1], 'VALID'), [-1, channel_list[2]])
        logits = tf.layers.dense(ave_vec, num_classes, kernel_initializer=tf.truncated_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.0001), name='logits')
        embed = logits
    return logits, embed
Exemplo n.º 3
0
    def _create_mover(self, x, reuse=False, train=True, name="mover"):
        with tf.variable_scope(name) as scope:
            if reuse:
                scope.reuse_variables()

            normalizer = partial(batch_norm, is_training=train)

            # residual blocks
            resamples = ["down"] * (self.num_mov_layers //
                                    2) + ["up"] * (self.num_mov_layers // 2)
            h = x
            for i in range(len(resamples)):
                h = residual_block(h,
                                   k=3,
                                   s=2,
                                   stddev=0.02,
                                   atv_input=i > 0,
                                   bn_input=i > 0,
                                   resample=resamples[i],
                                   output_dim=self.num_gen_feature_maps,
                                   bn=normalizer,
                                   activation_fn=tf.nn.relu,
                                   name="m_block{}".format(i))

            h = normalizer(h, scope="m_preout.bn")
            h = tf.nn.relu(h, name="m_preout.relu")
            h = tf.layers.conv2d(h,
                                 filters=3,
                                 kernel_size=3,
                                 strides=1,
                                 name="m_out.lin",
                                 padding="SAME")
            m_out = tf.nn.tanh(h, name="m_out.tanh")
            return m_out
Exemplo n.º 4
0
    def _create_critic(self, x, reuse=False, train=True, name="critic"):
        with tf.variable_scope(name) as scope:
            if reuse:
                scope.reuse_variables()

            normalizer = partial(batch_norm, is_training=train)

            # residual blocks
            resamples = ["down", "down", None, None]
            h = x
            for i in range(4):
                h = residual_block(h,
                                   k=3,
                                   s=2,
                                   stddev=0.02,
                                   atv_input=i > 0,
                                   bn_input=i > 0,
                                   resample=resamples[i],
                                   output_dim=self.num_cri_feature_maps,
                                   bn=normalizer,
                                   activation_fn=tf.nn.relu,
                                   name="c_block{}".format(i))

            # mean pool layer
            h = normalizer(h, scope="c_mean_pool.bn")
            h = tf.nn.relu(h, name="c_mean_pool.relu")
            h = tf.reduce_mean(h, axis=[1, 2], name="c_mean_pool")

            # output layer
            c_out = linear(h, 1, scope='c_out.lin')
            c_out = self.critic_atv(c_out, name="c_out.atv")
            return c_out
Exemplo n.º 5
0
def residual(inputs, reuse=None, scope=None, num_channels=1):
    net = inputs
    with tf.variable_scope(scope or "model", reuse=reuse) as scp:
        end_pts_collection = scp.name+"end_pts"
        with slim.arg_scope([slim.conv2d],
                            outputs_collections=end_pts_collection):
            net = slim.conv2d(net, 64, [3, 3],
                              normalizer_fn=None,
                              normalizer_params=None,
                              scope="preconv1")

            for i in range(10):
                net = ops.residual_block(net, 64, scope="unit{}".format(i+1))
            net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope="postact")

            net = slim.conv2d(net, 64, [3, 3], scope="postconv1")
            net = slim.conv2d(net, 64, [3, 3], scope="postconv2")
            net = slim.conv2d(net, 64, [3, 3], scope="postconv3")

            net = slim.conv2d(net, num_channels, [3, 3],
                              activation_fn=tf.nn.tanh,
                              normalizer_fn=None,
                              normalizer_params=None,
                              scope="logit")

            end_pts = slim.utils.convert_collection_to_dict(end_pts_collection)
            dn = inputs - net

    return dn, net, end_pts
Exemplo n.º 6
0
def wavenet_prior(net, is_training):
    from ops import causal_conv, gated_cnn, residual_block
    import json
    with open('wavenet.json') as file:
        wavenet_parameters = json.load(file)

    net = causal_conv(net, **wavenet_parameters['preprocess'])
    skip_outputs = []
    num_layers = len(wavenet_parameters['dilation_rates'])
    print('num_layers:', num_layers)
    print('net_0:', net.shape)
    for i in range(num_layers):
        layer_id = 'layer_%d' % (i + 1)
        conv_args = wavenet_parameters['residual_stack']
        conv_args['dilation_rate'] = wavenet_parameters['dilation_rates'][i]
        net = tf.keras.layers.BatchNormalization()(net)
        skip, net = residual_block(net, **conv_args)
        print('net_%d:' % (i + 1), net.shape,
              ' dr: %d' % conv_args['dilation_rate'])
        skip_outputs.append(skip)
        net = tf.keras.layers.Dropout(0.5)(net, training=is_training)
    net = sum(skip_outputs)
    net = tf.keras.activations.relu(net)
    net = causal_conv(net, **wavenet_parameters['postprocess1'])
    print('net:', net.shape)
    net = causal_conv(net, **wavenet_parameters['postprocess2'])
    print('net:', net.shape)

    shape = net.shape
    net = tf.reshape(net, [-1, shape[1] * shape[2]])

    return net
Exemplo n.º 7
0
def generator_128(input, scope_name, reuse=False, skip=False):
    """
	Funtion to build the generator of a GAN.

	Parameters:

		
	"""
    with tf.variable_scope(scope_name, reuse=reuse):
        c7s1_32 = ops.conv_instn_relu(input, [7, 7, 3, 32],
                                      'c7s1-32',
                                      1,
                                      padding="REFLECT")
        d64 = ops.conv_instn_relu(c7s1_32, [3, 3, 32, 64],
                                  'd64',
                                  2,
                                  padding="SAME")
        d128 = ops.conv_instn_relu(d64, [3, 3, 64, 128],
                                   'd128',
                                   2,
                                   padding="SAME")

        r128_1 = ops.residual_block(d128, [3, 3, 128, 128],
                                    'r128_1',
                                    padding="REFLECT")
        r128_2 = ops.residual_block(r128_1, [3, 3, 128, 128],
                                    'r128_2',
                                    padding="REFLECT")
        r128_3 = ops.residual_block(r128_2, [3, 3, 128, 128],
                                    'r128_3',
                                    padding="REFLECT")
        r128_4 = ops.residual_block(r128_3, [3, 3, 128, 128],
                                    'r128_4',
                                    padding="REFLECT")
        r128_5 = ops.residual_block(r128_4, [3, 3, 128, 128],
                                    'r128_5',
                                    padding="REFLECT")
        r128_6 = ops.residual_block(r128_5, [3, 3, 128, 128],
                                    'r128_6',
                                    padding="REFLECT")

        u64 = ops.convt_instn_relu(r128_6, [3, 3, 64, 128],
                                   tf.shape(d64),
                                   'u64',
                                   2,
                                   padding="SAME")
        u64.set_shape(d64.get_shape().as_list())
        u32 = ops.convt_instn_relu(u64, [3, 3, 32, 64],
                                   tf.shape(c7s1_32),
                                   'u32',
                                   2,
                                   padding="SAME")
        u32.set_shape(c7s1_32.get_shape().as_list())
        c7s1_3 = ops.conv2d(u32, [7, 7, 32, 3], 'c7s1-3', 1, padding="REFLECT")
        if skip:
            return tf.nn.tanh(c7s1_3 + input, "output")
        else:
            return tf.nn.tanh(c7s1_3, "output")
Exemplo n.º 8
0
def generator(img, attr, size):

    concat = Concatenate()([img, Lambda(tileAttr)(attr)])

    DownSample = functools.partial(Conv2D,
                                   padding="same",
                                   kernel_initializer=init_weight,
                                   kernel_regularizer=orthogonal)
    UpSample = functools.partial(Conv2DTranspose,
                                 padding="same",
                                 kernel_initializer=init_weight,
                                 kernel_regularizer=orthogonal)

    conv_in = DownSample(64, 7, name="conv_in_conv")(concat)
    conv_in = SwitchNormalization(axis=-1, name="conv_in_norm")(conv_in)
    conv_in = Activation('relu', name="conv_in_relu")(conv_in)

    down1 = DownSample(128, 4, strides=2, name="down1_conv")(conv_in)
    down1 = SwitchNormalization(axis=-1, name="down1_norm")(down1)
    down1 = Activation('relu', name="down1_relu")(down1)

    down2 = DownSample(256, 4, strides=2, name="down2_conv")(down1)
    down2 = SwitchNormalization(axis=-1, name="down2_norm")(down2)
    down2 = Activation('relu', name="down2_relu")(down2)

    resb = residual_block(down2, 256, 3, res_init_weight, 'block1')
    resb = residual_block(resb, 256, 3, res_init_weight, 'block2')
    resb = residual_block(resb, 256, 3, res_init_weight, 'block3')

    encode_out = resb

    resb = residual_block(resb, 256, 3, res_init_weight, 'block4')
    resb = residual_block(resb, 256, 3, res_init_weight, 'block5')
    resb = residual_block(resb, 256, 3, res_init_weight, 'block6')

    up2 = UpSample(128, 4, strides=2, name="up2_deconv2")(resb)
    up2 = SwitchNormalization(axis=-1, name="up2_norm")(up2)
    up2 = Activation('relu', name="up2_relu")(up2)
    brid2 = up2

    up1 = UpSample(64, 4, strides=2, name="up1_deconv2")(brid2)
    up1 = SwitchNormalization(axis=-1, name="up1_norm")(up1)
    up1 = Activation('relu', name="up1_relu")(up1)
    brid3 = up1

    conv_out = DownSample(3, 7, name="conv_out_conv")(brid3)
    conv_out = Activation('tanh', name="conv_out_tanh")(conv_out)
    return conv_out, encode_out
Exemplo n.º 9
0
    def __call__(self, input_op):
        # 改回了魔术方法的实现,更加简洁
        with tf.variable_scope(self.name):
            conv1 = ops.conv_block(input_op,
                                   32,
                                   'conv1',
                                   7,
                                   1,
                                   self.is_train,
                                   self.reuse,
                                   self.norm,
                                   self.activation,
                                   pad='REFLECT')
            conv2 = ops.conv_block(conv1, 64, 'conv2', 3, 2, self.is_train,
                                   self.reuse, self.norm, self.activation)
            res = ops.conv_block(conv2, 128, 'conv3', 3, 2, self.is_train,
                                 self.reuse, self.norm, self.activation)
            for i in range(self.block_size):
                res = ops.residual_block(res, 128, 'res' + str(i),
                                         self.is_train, self.reuse, self.norm)
            deconv1 = ops.deconv_block(res, 64, 'deconv1', 3, 2, self.is_train,
                                       self.reuse, self.norm, self.activation)
            deconv2 = ops.deconv_block(deconv1, 32, 'deconv2', 3, 2,
                                       self.is_train, self.reuse, self.norm,
                                       self.activation)
            self.gen = ops.conv_block(deconv2,
                                      3,
                                      'conv_end',
                                      7,
                                      1,
                                      self.is_train,
                                      self.reuse,
                                      norm=None,
                                      activation=tf.nn.tanh,
                                      pad='REFLECT')

            self.reuse = True

            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              self.name)
Exemplo n.º 10
0
    def _create_generator(self, z, train=True, reuse=False, name="generator"):
        with tf.variable_scope(name) as scope:
            if reuse:
                scope.reuse_variables()

            normalizer = partial(batch_norm, is_training=train)

            # project to the first layer
            h = linear(z,
                       4 * 4 * self.num_gen_feature_maps,
                       scope="g_h0.linear")
            h = tf.reshape(h,
                           [self.batch_size, 4, 4, self.num_gen_feature_maps])

            if self.img_size[0] == 32:
                num_res_blocks = 3
            elif self.img_size[0] == 64:
                num_res_blocks = 4
            for i in range(num_res_blocks):
                h = residual_block(h,
                                   k=3,
                                   s=2,
                                   stddev=0.02,
                                   resample="up",
                                   output_dim=self.num_gen_feature_maps,
                                   bn=normalizer,
                                   activation_fn=tf.nn.relu,
                                   name="g_block{}".format(i))

            h = normalizer(h, scope="g_preout.bn")
            h = tf.nn.relu(h, name="g_preout.relu")
            h = tf.layers.conv2d(h,
                                 filters=3,
                                 kernel_size=3,
                                 strides=1,
                                 name="g_out.lin",
                                 padding="SAME")
            g_out = tf.nn.tanh(h, name="g_out.tanh")
            return g_out
Exemplo n.º 11
0
    def __init__(self, data_loader, layers=(16, 32, 64), residual_layers=(5, 5, 5), data_augmentation=True,
                 non_core_layers=(1, 1, 1), learning_rate=0.01, batch_size=128, zero_init=False):
        """
        :param layers: tuple that has the depth dimension vector
        """

        assert (len(layers) == len(residual_layers))
        m_data, labels, self.valid_data, self.valid_labels = data_loader.get_data()

        self.q = DataQueue(m_data, labels, batch_size, capacity=200, threads=32, data_aug=data_augmentation)
        self.q.start()

        n, y_dim, x_dim, channel = m_data.shape
        y_dim = x_dim = 32
        self.batch_size = batch_size
        self.batch_len_in_epoch = int(math.ceil(n / self.batch_size)) - 1
        self.layers = layers
        self.residual_layers_between = residual_layers

        self.x = tf.placeholder(tf.float32, shape=(batch_size, y_dim, x_dim, channel))
        self.y = tf.placeholder(tf.int32, shape=(batch_size,))

        self.phase = tf.placeholder(tf.bool, name='phase')

        self.global_step = tf.Variable(0, trainable=False, name='global_step')

        self.learning_rate = tf.Variable(learning_rate, trainable=False, dtype=tf.float32, name='learning_rate')
        self.decrease_learning_rate = tf.assign(self.learning_rate, tf.multiply(self.learning_rate, 0.1))

        # Layers
        conv_layer = None
        for i, depth in enumerate(layers):
            if conv_layer is None:
                conv_layer = residual_block(self.x, depth, block_num=str(depth), first_block=True, core=True,
                                            is_training=self.phase)
            else:
                conv_layer = residual_block(conv_layer, depth, block_num=str(depth), first_block=False, core=True,
                                            is_training=self.phase)
            assert (conv_layer.get_shape()[-1] == depth)

            for k in range(residual_layers[i]):
                if k > residual_layers[i] - non_core_layers[i]:
                    # non-core layer
                    conv_layer = residual_block(conv_layer, depth, block_num=str(depth) + '_' + str(k),
                                                zero_init=zero_init, is_training=self.phase)
                else:
                    conv_layer = residual_block(conv_layer, depth, block_num=str(depth) + '_' + str(k), core=True,
                                                zero_init=False, is_training=self.phase)
                assert (conv_layer.get_shape()[-1] == depth)

        self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        pool_layer = tf.reduce_mean(conv_layer, [1, 2])
        self.logits = tf.layers.dense(tf.reshape(pool_layer, [batch_size, -1]), 10)

        self.prediction = tf.argmax(self.logits, 1)
        trainable_vars = tf.trainable_variables()
        core_var_list = [v for v in trainable_vars if 'core' in v.name]

        loss_l2 = tf.add_n([tf.nn.l2_loss(v) for v in trainable_vars if 'bn' not in v.name]) / 2 * 0.0001

        self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=self.logits, labels=self.y)) + loss_l2

        prediction = tf.equal(tf.argmax(self.logits, 1), tf.cast(self.y, tf.int64))

        self.accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))

        with tf.control_dependencies(self.update_ops):
            with tf.name_scope('train'):
                self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.learning_rate)

                self.train_op = self.optimizer.minimize(loss=self.loss, global_step=self.global_step)

                # train core only
                self.train_core_op = self.optimizer.minimize(loss=self.loss, var_list=core_var_list,
                                                             global_step=self.global_step)

        self.saver = tf.train.Saver(max_to_keep=10)
Exemplo n.º 12
0
def convResnet(x,
               is_training,
               aux=None,
               reuse=None,
               scope='dn_net',
               ngf=32,
               n_blocks=2,
               n_down=2,
               learn_residual=True):
    with tf.variable_scope(scope, reuse=reuse) as scp:
        end_pts_collection = scp.name + 'end_pts'
        weight_collection = scp.name + 'weight'
        with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
                            activation_fn=tf.nn.leaky_relu,
                            normalizer_fn=None,
                            weights_regularizer=slim.l2_regularizer(0.05),
                            variables_collections=[weight_collection],
                            outputs_collections=end_pts_collection):
            scale_skips = []
            cat_axis = 3  # NHWC
            if aux is None:
                cnv = slim.conv2d(x, ngf, [3, 3], scope='cnv0')
            else:
                cnv0_d = slim.conv2d(x,
                                     ngf // 2, [3, 3],
                                     stride=2,
                                     scope='cnv0_d')
                cnv0_c = slim.conv2d(resize_like(aux, cnv0_d),
                                     ngf // 2, [1, 1],
                                     stride=1,
                                     scope='cnv0_c')
                cnv = tf.concat([cnv1_d, cnv0_c], axis=3)
            scale_skips.append(cnv)

            mult = 1
            for i in range(n_down):
                mult *= 2
                cnv = slim.conv2d(cnv,
                                  ngf * mult, [3, 3],
                                  stride=2,
                                  scope='cnv_down{}'.format(i))
                scale_skips.append(cnv)
            for i in range(n_blocks):
                cnv = ops.residual_block(cnv,
                                         ngf * mult,
                                         norm_fn=None,
                                         scope='res{}'.format(i))
            for i in range(n_down):
                mult /= 2
                cnv = tf.concat([scale_skips[-1], cnv],
                                cat_axis,
                                name='cat{}'.format(i + 1))
                scale_skips.pop()
                cnv = slim.conv2d_transpose(cnv,
                                            ngf * mult, [3, 3],
                                            stride=2,
                                            scope='cnv_up{}'.format(i))

            cnv = tf.concat([scale_skips[0], cnv], cat_axis, name='cat_final')
            assert len(scale_skips) == 1
            del scale_skips
            cnv = slim.conv2d(cnv,
                              1, [3, 3],
                              activation_fn=tf.nn.tanh,
                              normalizer_fn=None,
                              scope='cnv_final')

            if learn_residual:
                out = x + cnv
                out = tf.clip_by_value(out, -1, 1)
            else:
                out = cnv  # if cnv_final act is tanh
                # out = tf.clip_by_value(cnv, -1, 1)  # if cnv_final act is None

            end_pts = slim.utils.convert_collection_to_dict(end_pts_collection)
            weight_vars = tf.get_collection(weight_collection)
            return out, end_pts, weight_vars
        def C(img, scope='Classifier', reuse=False):
            with tf.variable_scope(scope) as scope:
                log.warn(scope.name)
                _ = img

                # MNIST, Fashion MNIST, SVHN, CIFAR
                if not self.config.dataset == 'ImageNet':
                    # conv layers
                    num_channels = [64, 128, 256, 512]
                    for i in range(len(num_channels)):
                        _ = conv2d(_,
                                   num_channels[i],
                                   is_train,
                                   norm_type=self.norm_type,
                                   info=not reuse,
                                   name='conv_{}'.format(i))
                        _ = slim.dropout(_,
                                         keep_prob=0.5,
                                         is_training=is_train)

                    # fc layers
                    _ = tf.reshape(_, [self.batch_size, -1])
                    num_fc_channels = [512, 128, 32, n]
                    for i in range(len(num_fc_channels)):
                        _ = fc(_,
                               num_fc_channels[i],
                               is_train,
                               norm_type='none',
                               info=not reuse,
                               name='fc_{}'.format(i))
                # ImageNet
                else:
                    # conv layers
                    num_channels = [64, 128, 256, 512, 1024]
                    num_residual_block = [0, 2, 3, 5, 2]
                    for i in range(len(num_channels)):
                        _ = conv2d(_,
                                   num_channels[i],
                                   is_train,
                                   norm_type=self.norm_type,
                                   info=not reuse,
                                   name='conv_{}'.format(i))
                        for j in range(num_residual_block[i]):
                            _ = residual_block(_,
                                               num_channels[i],
                                               is_train,
                                               norm_type=self.norm_type,
                                               info=not reuse,
                                               name='residual_{}_{}'.format(
                                                   i, j))
                    _ = tf.layers.average_pooling2d(_, [7, 7], [7, 7])
                    log.info('{} {}'.format(_.name, _.get_shape().as_list()))
                    # fc layers
                    _ = tf.reshape(_, [self.batch_size, -1])
                    num_fc_channels = [n]
                    for i in range(len(num_fc_channels)):
                        _ = fc(_,
                               num_fc_channels[i],
                               is_train,
                               norm_type='none',
                               info=not reuse,
                               name='fc_{}'.format(i))
                return _
Exemplo n.º 14
0
        def Joint_Decoder(pixel_input,
                          flow_input,
                          input_image,
                          scope='Joint_Decoder',
                          reuse=False):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warn(scope.name)
                assert pixel_input[-1].get_shape() == flow_input[-1].get_shape(
                )

                _p = pixel_input[-1]
                _f = flow_input[-1]

                with tf.variable_scope('Pixel'):
                    # Res blocks
                    ch = int(_p.get_shape()[-1])
                    for i in range(int(num_res_block_pixel)):
                        _p = residual_block(_p,
                                            ch,
                                            is_train,
                                            info=not reuse,
                                            norm=self.norm_type,
                                            name='pixel_R{}_{}'.format(ch, i))

                with tf.variable_scope('Flow'):
                    # Res blocks
                    ch = int(_f.get_shape()[-1])
                    for i in range(int(num_res_block_flow)):
                        _f = residual_block(_f,
                                            ch,
                                            is_train,
                                            info=not reuse,
                                            norm=self.norm_type,
                                            name='flow_R{}_{}'.format(ch, i))

                # Deconv
                pixel_output = None
                flow_output = None
                pixel_output_list = []
                flow_output_list = []
                x_list = []
                y_list = []
                pixel_mask_list = []
                flow_mask_list = []
                num_deconv_layer = int(
                    np.log2(self.input_height /
                            int(pixel_input[-1].get_shape()[1])))
                num_channel = [256, 128, 64, 32, 16, 8]
                for i in range(num_deconv_layer):

                    with tf.variable_scope('Flow'):
                        _f = deconv2d(_f, [num_channel[i], 3, 2],
                                      is_train,
                                      info=not reuse,
                                      norm=self.norm_type,
                                      name='flow_deconv{}'.format(i + 1))

                    with tf.variable_scope('Pixel'):
                        # skip connection
                        if not (num_res_block_pixel == 0 and i == 0):
                            _p = tf.concat([_p, pixel_input.pop(-1)], axis=-1)
                        else:
                            pixel_input.pop(-1)
                        if not reuse:
                            log.info(
                                'pixel_deconv{}_in_layer_concat {}'.format(
                                    i + 1,
                                    _p.get_shape().as_list()))
                        _p = deconv2d(_p, [num_channel[i], 3, 2],
                                      is_train,
                                      info=not reuse,
                                      norm=self.norm_type,
                                      name='pixel_deconv{}'.format(i + 1))

                    if num_deconv_layer - i <= num_scale:
                        with tf.variable_scope('Flow'):
                            flow_output = deconv2d(
                                _f, [int(num_channel[i] / 2), 3, 1],
                                is_train,
                                info=not reuse,
                                norm=self.norm_type,
                                name='flow_deconv{}_out_layer_1'.format(i + 1))
                            flow_output = deconv2d(
                                flow_output, [3, 3, 1],
                                is_train,
                                info=not reuse,
                                norm='None',
                                activation_fn=None,
                                name='flow_deconv{}_out_layer_2'.format(i + 1))

                            # bilinear sample: flow -> img
                            x, y = flow_output[:, :, :,
                                               0], flow_output[:, :, :, 1]
                            flow_mask = flow_output[:, :, :, -1]
                            h = int(flow_output.get_shape()[1])
                            w = int(flow_output.get_shape()[2])
                            x_g = tf.convert_to_tensor(np.expand_dims(
                                2 * np.array(range(w)) / w - 1, axis=0),
                                                       dtype=tf.float32)
                            y_g = tf.convert_to_tensor(np.expand_dims(
                                2 * np.array(range(h)) / h - 1, axis=1),
                                                       dtype=tf.float32)
                            flow_output_img = bilinear_sampler(
                                tf.image.resize_nearest_neighbor(
                                    (input_image + 1) / 2, [h, w]), x + x_g,
                                y + y_g) * 2 - 1
                            flow_output_list.append(flow_output_img)
                            x_list.append(x)
                            y_list.append(y)
                            flow_mask_list.append(flow_mask)

                        with tf.variable_scope('Pixel'):
                            if i == num_deconv_layer - 1:
                                log.error('pixel output')
                                pixel_output = deconv2d(
                                    _p, [int(num_channel[i] / 2), 3, 1],
                                    is_train,
                                    info=not reuse,
                                    norm=self.norm_type,
                                    name='pixel_deconv{}_out_layer_1'.format(
                                        i + 1))
                                pixel_output = deconv2d(
                                    pixel_output, [c + 1, 3, 1],
                                    is_train,
                                    info=not reuse,
                                    norm='None',
                                    activation_fn=None,
                                    name='pixel_deconv{}_out_layer_2'.format(
                                        i + 1))
                                pixel_mask = pixel_output[:, :, :, -1]
                                pixel_output = tf.tanh(
                                    pixel_output[:, :, :, :c])
                            else:
                                log.error('pixel dummy output')
                                pixel_output = tf.zeros_like(flow_output)
                                pixel_mask = tf.zeros_like(flow_mask)
                            pixel_mask_list.append(pixel_mask)
                            pixel_output_list.append(pixel_output)

                        if not reuse:
                            log.infov(
                                'flow_output_img: {}, pixel output: {}, '
                                'flow_feature_map: {}, pixel_feature_map: {}'.
                                format(flow_output_img.get_shape().as_list(),
                                       pixel_output.get_shape().as_list(),
                                       _f.get_shape().as_list(),
                                       _p.get_shape().as_list()))
                        _f = tf.concat([_f, flow_output], axis=-1)
                        _p = tf.concat([_p, pixel_output], axis=-1)
                        if not reuse:
                            log.info(
                                'flow_deconv{}_out_layer_concat {}'.format(
                                    i + 1,
                                    _f.get_shape().as_list()))
                        if not reuse:
                            log.info(
                                'pixel_deconv{}_out_layer_concat {}'.format(
                                    i + 1,
                                    _p.get_shape().as_list()))

            return pixel_output_list, pixel_mask_list, flow_output_list, \
                flow_mask_list, x_list, y_list