Exemplo n.º 1
0
def add_residual_pre(prev_layer,
                     z_concat=None,
                     text_filters=None,
                     k_h=5,
                     k_w=5,
                     hidden_text_filters=None,
                     hidden_filters=None,
                     name_func=None):

    filters = prev_layer.get_shape()[3].value
    if hidden_filters == None:
        hidden_filters = filters * 4
    if text_filters == None:
        text_filters = int(filters / 2)
    if hidden_text_filters == None:
        hidden_text_filters = int(filters / 8)
    s = prev_layer.get_shape()[1].value

    bn0 = util.batch_norm(name=g_name())
    bn1 = util.batch_norm(name=g_name())

    low_dim = util.conv2d(util.lrelu(bn0(prev_layer)),
                          hidden_filters,
                          k_h=k_h,
                          k_w=k_w,
                          name=name_func())

    residual = util.deconv2d(util.lrelu(bn1(low_dim), name=name_func()),
                             [batch_size, s, s, filters],
                             k_h=k_h,
                             k_w=k_w,
                             name=name_func())

    next_layer = prev_layer + residual
    return next_layer
Exemplo n.º 2
0
    def set_model(self, z, batch_size, is_training, reuse = False):

        # reshape z
        with tf.variable_scope(self.name_scope_reshape, reuse = reuse):
            w_r = get_weights('_r',
                              [self.z_dim, self.in_dim * self.in_dim * self.layer_chanels[0]],
                              0.02)
            b_r = get_biases('_r',
                             [self.in_dim * self.in_dim * self.layer_chanels[0]],
                             0.0)
            h = tf.matmul(z, w_r) + b_r
            h = batch_norm(h, 'reshape', is_training)
            #h = tf.nn.relu(h)
            h = lrelu(h)
            
        h = tf.reshape(h, [-1, self.in_dim, self.in_dim, self.layer_chanels[0]])

        # deconvolution
        layer_num = len(self.layer_chanels) - 1
        with tf.variable_scope(self.name_scope_deconv, reuse = reuse):
            for i, (in_chan, out_chan) in enumerate(zip(self.layer_chanels, self.layer_chanels[1:])):
                deconved = deconv_layer(inputs = h,
                                        out_shape = [batch_size, self.in_dim * 2 ** (i + 1), self.in_dim * 2 **(i + 1), out_chan],
                                        filter_width = 5, filter_hight = 5,
                                        stride = 2, l_id = i)
                if i == layer_num -1:
                    h = tf.nn.tanh(deconved)
                else:
                    bn_deconved = batch_norm(deconved, i, is_training)
                    #h = tf.nn.relu(bn_deconved)
                    h = lrelu(bn_deconved)

        return h
Exemplo n.º 3
0
def build_model(x, scale, training, reuse):
    hidden_size = 128
    bottleneck_size = 64
    x = tf.layers.conv2d(x,
                         hidden_size,
                         1,
                         activation=None,
                         name='in',
                         reuse=reuse)
    for i in range(6):
        x = util.crop_by_pixel(
            x, 1) + conv(x, hidden_size, bottleneck_size, training,
                         'lr_conv' + str(i), reuse)
    if (scale == 4):
        scale = 2
        x = tf.image.resize_nearest_neighbor(
            x,
            tf.shape(x)[1:3] * scale) + tf.layers.conv2d_transpose(
                util.lrelu(x),
                hidden_size,
                scale,
                strides=scale,
                activation=None,
                name='up1',
                reuse=reuse)
        x = util.crop_by_pixel(x, 1) + conv(x, hidden_size, bottleneck_size,
                                            training, 'up_conv', reuse)
        x = tf.image.resize_nearest_neighbor(
            x,
            tf.shape(x)[1:3] * scale) + tf.layers.conv2d_transpose(
                util.lrelu(x),
                hidden_size,
                scale,
                strides=scale,
                activation=None,
                name='up2',
                reuse=reuse)
    else:
        x = tf.image.resize_nearest_neighbor(
            x,
            tf.shape(x)[1:3] * scale) + tf.layers.conv2d_transpose(
                util.lrelu(x),
                hidden_size,
                scale,
                strides=scale,
                activation=None,
                name='up',
                reuse=reuse)
    for i in range(4):
        x = util.crop_by_pixel(
            x, 1) + conv(x, hidden_size, bottleneck_size, training,
                         'hr_conv' + str(i), reuse)
    x = util.lrelu(x)
    x = tf.layers.conv2d(x, 3, 1, activation=None, name='out', reuse=reuse)
    return x
Exemplo n.º 4
0
def build_model(x, scale, training, reuse):
    hidden_size = 128
    bottleneck_size = 64
    x = tf.layers.conv2d(x,
                         hidden_size,
                         1,
                         activation=None,
                         name='in',
                         reuse=reuse)
    for i in range(6):
        x = util.crop_by_pixel(
            x, 1) + conv(x, hidden_size, bottleneck_size, training,
                         'lr_conv' + str(i), reuse)
    x = util.lrelu(x)
    if scale == 4:
        scale = 2
        x = tf.layers.conv2d_transpose(x,
                                       hidden_size,
                                       scale,
                                       strides=scale,
                                       activation=None,
                                       name='up1',
                                       reuse=reuse)
        x = util.crop_by_pixel(x, 1) + conv(x, hidden_size, bottleneck_size,
                                            training, 'up_conv', reuse)
        x = util.lrelu(x)
        hidden_size = 64
        x = tf.layers.conv2d_transpose(x,
                                       hidden_size,
                                       scale,
                                       strides=scale,
                                       activation=None,
                                       name='up2',
                                       reuse=reuse)
    else:
        hidden_size = 64
        x = tf.layers.conv2d_transpose(x,
                                       hidden_size,
                                       scale,
                                       strides=scale,
                                       activation=None,
                                       name='up',
                                       reuse=reuse)
    for i in range(4):
        x = util.crop_by_pixel(
            x, 1) + conv(x, hidden_size, bottleneck_size, training,
                         'hr_conv' + str(i), reuse)
    x = util.lrelu(x)
    x = tf.layers.conv2d(x, 3, 1, activation=None, name='out', reuse=reuse)
    return x
Exemplo n.º 5
0
    def set_model(self, z, labels, batch_size, is_training, reuse=False):

        # reshape z
        with tf.variable_scope(self.name_scope_reshape, reuse=reuse):
            h = linear_layer(z, self.z_dim,
                             self.in_dim * self.in_dim * self.layer_chanels[0],
                             'reshape')
            h = batch_norm(h, 'reshape', is_training)
            h = lrelu(h)

        h_z = tf.reshape(h,
                         [-1, self.in_dim, self.in_dim, self.layer_chanels[0]])
        # reshape labels
        with tf.variable_scope(self.name_scope_label, reuse=reuse):
            h = linear_layer(z, self.z_dim,
                             self.in_dim * self.in_dim * self.layer_chanels[0],
                             'label')
            h = batch_norm(h, 'label', is_training)
            h = lrelu(h)

        # concat
        h_label = tf.reshape(
            h, [-1, self.in_dim, self.in_dim, self.layer_chanels[0]])
        h = tf.concat([h_z, h_label], 3)

        # deconvolution
        layer_num = len(self.layer_chanels) - 1
        with tf.variable_scope(self.name_scope_deconv, reuse=reuse):
            for i, (in_chan, out_chan) in enumerate(
                    zip(self.layer_chanels, self.layer_chanels[1:])):
                deconved = deconv_layer(inputs=h,
                                        out_shape=[
                                            batch_size,
                                            self.in_dim * 2**(i + 1),
                                            self.in_dim * 2**(i + 1), out_chan
                                        ],
                                        filter_width=5,
                                        filter_hight=5,
                                        stride=2,
                                        l_id=i)
                if i == layer_num - 1:
                    h = tf.nn.tanh(deconved)
                else:
                    bn_deconved = batch_norm(deconved, i, is_training)
                    #h = tf.nn.relu(bn_deconved)
                    h = lrelu(bn_deconved)

        return h
Exemplo n.º 6
0
def conv(x, hidden_size, bottleneck_size, training, name, reuse):
    x = util.lrelu(x)
    x = tf.layers.conv2d(x,
                         bottleneck_size,
                         1,
                         activation=None,
                         name=name + '_proj',
                         reuse=reuse)

    x = util.lrelu(x)
    x = tf.layers.conv2d(x,
                         hidden_size,
                         3,
                         activation=None,
                         name=name + '_filt',
                         reuse=reuse)
    return x
Exemplo n.º 7
0
def conv(x, hidden_size, bottleneck_size, training, name, reuse):
    x = util.lrelu(x)
    x = tf.layers.conv2d(x,
                         bottleneck_size,
                         1,
                         activation=None,
                         name=name + '_proj',
                         reuse=reuse)

    x = util.lrelu(x)
    x = tf.layers.conv2d(x,
                         hidden_size * 2,
                         3,
                         activation=None,
                         name=name + '_filt',
                         reuse=reuse)
    x, y = tf.split(x, 2, 3)
    x = x * tf.nn.sigmoid(x)

    return x
    def __call__(self, x, is_training, reuse):
        # return only logits
        h = x
        with tf.variable_scope(self.name_scope, reuse=reuse):
            for i, (in_dim, out_dim) in enumerate(
                    zip(self.layer_list, self.layer_list[1:-1])):
                h = linear_layer(h, in_dim, out_dim, i)
                #h = batch_norm(h, i, is_training=is_training)
                h = lrelu(h)

            ret = linear_layer(h, self.layer_list[-2], self.layer_list[-1],
                               'output')
        return ret
Exemplo n.º 9
0
def dcgan_discriminator(features, model_params, isTrain=True, reuse=False):
    with tf.variable_scope('discriminator', reuse=reuse):
        # initializer
        w_init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02)
        b_init = tf.constant_initializer(0.0)

        x = tf.reshape(features['x'], (-1,)+model_params['x_pre_shape'])
        y_width, y_height, _ = model_params['y_pre_shape']
        y = tf.reshape(tf.tile(features['y'], [1, y_width*y_height]), (-1,)+model_params['y_pre_shape'])
        summ_y = tf.summary.tensor_summary('y_summ', y)

        def conv_fn(data_in, param):
            return tf.layers.conv2d(data_in, param[0], param[1], strides=param[2], padding=param[3],
                                    kernel_initializer=w_init, bias_initializer=b_init)

        x_conv = lrelu(conv_fn(x, model_params['x_conv_param']))
        y_conv = lrelu(conv_fn(y, model_params['y_conv_param']))

        # concat layer
        cat1 = tf.concat([x_conv, y_conv], 3)

        prev = cat1
        for param in model_params['mid_conv_params']:
            conv = conv_fn(prev, param)
            conv_batch_normalized = tf.layers.batch_normalization(conv)
            relu = lrelu(conv_batch_normalized)
            prev = relu
            
        # output layer
        conv = conv_fn(prev, model_params['final_conv_param'])

        assert_op = tf.assert_equal(tf.shape(conv)[1:], model_params['output_shape'])
        with tf.control_dependencies([assert_op, summ_y]):
            o = tf.nn.sigmoid(conv)

        return o, conv
Exemplo n.º 10
0
    def __call__(self, x, is_training, reuse):
        h = x
        with tf.variable_scope(self.name_scope, reuse=reuse):
            for i, (in_dim, out_dim) in enumerate(
                    zip(self.layer_list, self.layer_list[1:-1])):
                h = linear_layer(h, in_dim, out_dim, i)
                h = batch_norm(h, i, is_training=is_training)
                h = lrelu(h)

            mu = linear_layer(h, self.layer_list[-2], self.layer_list[-1],
                              'mu')
            log_sigma = linear_layer(h, self.layer_list[-2],
                                     self.layer_list[-1], 'log_sigma')

            return mu, log_sigma
Exemplo n.º 11
0
def cgan_discriminator(features, model_params, isTrain=True, reuse=False):
    with tf.variable_scope('discriminator', reuse=reuse):
        w_init = tf.contrib.layers.xavier_initializer()

        x = tf.reshape(features['x'], (-1,)+model_params['input_shape'][0])
        y = tf.reshape(features['y'], (-1,)+model_params['input_shape'][1])

        x_representation = tf.layers.dense(x, model_params['x_dense_width'], kernel_initializer=w_init)
        y_representation = tf.layers.dense(y, model_params['y_dense_width'], kernel_initializer=w_init)

        cat1 = tf.concat([x_representation, y_representation], 1)

        prev = cat1
        for width in model_params['mid_dense_widths']:
            dense = tf.layers.dense(prev, width, kernel_initializer=w_init)
            dense_batch_normalized = tf.layers.batch_normalization(dense)
            leaky_relu = lrelu(dense_batch_normalized)
            prev = leaky_relu
            
        logit = tf.layers.dense(prev, model_params['output_shape'][0][0], kernel_initializer=w_init)
        o = tf.nn.sigmoid(logit)

        return o, logit
Exemplo n.º 12
0
def discriminator(x_image, y_label,
                  batch_size=10,
                  dim_con=64,
                  dim_fc=1024,
                  reuse=False):
    """
    Returns the discriminator network. It takes an image and returns a real/fake classification across each label.
    The discriminator network is structured as a Convolution Neural Net with two layers of convolution and pooling,
    followed by two fully-connected layers.

    Args:
        x_image:
        y_label:
        batch_size:
        dim_con:
        dim_fc:
        reuse:

    Returns:
        The discriminator network.
    """
    with tf.variable_scope("discriminator") as scope:
        if reuse:
            scope.reuse_variables()

        # create x as the joint 4-D feature representation of the image and the label
        y_4d = tf.reshape(y_label, [batch_size, 1, 1, DIM_Y])
        x_4d = tf.reshape(x_image, [batch_size, 28, 28, 1])
        x = concat(x_4d, y_4d)

        tf.summary.histogram('act_d0', x)

        # first convolution-activation-pooling layer
        d1 = cnn_block(x, 1 + DIM_Y, 'd1')

        # join the output of the previous layer with the labels vector
        d1 = concat(d1, y_4d)

        tf.summary.histogram('act_d1', d1)

        # second convolution-activation-pooling layer
        d2 = cnn_block(d1, dim_con + DIM_Y, 'd2')

        # flatten the output of the second layer to a 2-D matrix with shape - [batch, ?]
        d2 = tf.reshape(d2, [batch_size, -1])

        # join the flattened output with the labels vector and apply this as input to
        # a series of fully connected layers.
        d2 = tf.concat([d2, y_label], 1)

        tf.summary.histogram('act_d2', d2)

        # first fully connected layer
        d3 = tf.nn.dropout(lrelu(linear(
            x_input=d2,
            dim_in=d2.get_shape().as_list()[-1],
            dim_out=dim_fc,
            name='d3')), KEEP_PROB)

        # join the output of the previous layer with the labels vector
        d3 = tf.concat([d3, y_label], 1)

        tf.summary.histogram('act_d3', d3)

        # second and last fully connected layer
        # calculate the un-normalized log probability of each label
        d4_logits = linear(d3, dim_fc + DIM_Y, 1, 'd4')

        # calculate the activation values, dimension - [batch, 1]
        d4 = tf.nn.sigmoid(d4_logits)

        tf.summary.histogram('act_d4', d4)

        return d4, d4_logits