Пример #1
0
def single_resblock(adain_use, is_training, residual_device, initializer,
                    scope, weight_decay, weight_decay_rate, x, layer, style,
                    filters, other_info):

    if not adain_use:
        norm1 = batch_norm(x=x,
                           is_training=is_training,
                           scope="layer%d_bn1" % layer,
                           parameter_update_device=residual_device)
    else:
        if other_info == 'DenseMixer':
            travel_times = int(int(x.shape[3]) / int(style.shape[4]))
            style_tile = tf.tile(style, [1, 1, 1, 1, travel_times])
            norm1 = adaptive_instance_norm(content=x, style=style_tile)
        elif other_info == 'ResidualMixer':
            norm1 = adaptive_instance_norm(content=x, style=style)

    act1 = relu(norm1)
    conv1 = conv2d(x=act1,
                   output_filters=filters,
                   scope="layer%d_conv1" % layer,
                   parameter_update_device=residual_device,
                   kh=3,
                   kw=3,
                   sh=1,
                   sw=1,
                   initializer=initializer,
                   weight_decay=weight_decay,
                   name_prefix=scope,
                   weight_decay_rate=weight_decay_rate)
    if not adain_use:
        norm2 = batch_norm(x=conv1,
                           is_training=is_training,
                           scope="layer%d_bn2" % layer,
                           parameter_update_device=residual_device)
    else:

        norm2 = adaptive_instance_norm(content=conv1, style=style)
    act2 = relu(norm2)
    conv2 = conv2d(x=act2,
                   output_filters=filters,
                   scope="layer%d_conv2" % layer,
                   parameter_update_device=residual_device,
                   initializer=initializer,
                   weight_decay=weight_decay,
                   name_prefix=scope,
                   weight_decay_rate=weight_decay_rate,
                   kh=3,
                   kw=3,
                   sh=1,
                   sw=1)

    if other_info == 'ResidualMixer':
        output = x + conv2
    elif other_info == 'DenseMixer':
        output = conv2

    return output
Пример #2
0
    def decoder(x,
                output_width,
                output_filters,
                layer,
                do_norm=False,
                dropout=False):
        dec = deconv2d(x=tf.nn.relu(x),
                       output_shape=[
                           batch_size, output_width, output_width,
                           output_filters
                       ],
                       scope="layer%d_conv" % layer,
                       parameter_update_device=decoder_device,
                       weight_decay=weight_decay,
                       initializer=initializer,
                       name_prefix=scope,
                       weight_decay_rate=weight_decay_rate)
        if do_norm:
            # IMPORTANT: normalization for last layer
            # Very important, otherwise GAN is unstable
            if not adain_use:
                dec = batch_norm(dec,
                                 is_training,
                                 scope="layer%d_bn" % layer,
                                 parameter_update_device=decoder_device)
            else:
                dec = layer_norm(x=dec,
                                 scope="layer%d_ln" % layer,
                                 parameter_update_device=decoder_device)

        if dropout:
            dec = tf.nn.dropout(dec, 0.5)

        return dec
Пример #3
0
    def encoder(x, output_filters, layer):

        act = lrelu(x)
        conv = conv2d(x=act,
                      output_filters=output_filters,
                      scope="layer%d_conv" % layer,
                      parameter_update_device=encoder_device,
                      initializer=initializer,
                      weight_decay=weight_decay,
                      name_prefix=scope,
                      weight_decay_rate=weight_decay_rate)
        if not adain_use:
            enc = batch_norm(conv, is_training, scope="layer%d_bn" % layer,
                             parameter_update_device=encoder_device)
        elif adain_use==True and 'content' in scope:
            enc = instance_norm(x=conv, scope="layer%d_in" % layer,
                                parameter_update_device=encoder_device)
        else:
            enc = conv
        return enc
Пример #4
0
def vgg_16_net(image,
               batch_size,
               device,
               keep_prob,
               initializer,
               reuse=False,
               network_usage='-1',
               output_high_level_features=[-1]):
    is_training = False
    weight_decay = False
    return_str = "Vgg16Net"
    weight_decay_rate = eps

    usage_scope = network_usage + '/ext_vgg16net'

    with tf.variable_scope(usage_scope):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        features = list()

        ## block 1
        conv1_1 = relu(
            batch_norm(x=conv2d(x=image,
                                output_filters=64,
                                kh=3,
                                kw=3,
                                sh=1,
                                sw=1,
                                padding='SAME',
                                parameter_update_device=device,
                                weight_decay=weight_decay,
                                initializer=initializer,
                                scope='conv1_1',
                                weight_decay_rate=weight_decay_rate),
                       is_training=is_training,
                       scope='bn1_1',
                       parameter_update_device=device))

        conv1_2 = conv2d(x=conv1_1,
                         output_filters=64,
                         kh=3,
                         kw=3,
                         sh=1,
                         sw=1,
                         padding='SAME',
                         parameter_update_device=device,
                         weight_decay=weight_decay,
                         initializer=initializer,
                         scope='conv1_2',
                         weight_decay_rate=weight_decay_rate)
        if 1 in output_high_level_features:
            features.append(conv1_2)
        conv1_2 = relu(
            batch_norm(x=conv1_2,
                       is_training=is_training,
                       scope='bn1_2',
                       parameter_update_device=device))
        pool1 = tf.nn.max_pool(value=conv1_2,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool1')

        ## block 2
        conv2_1 = relu(
            batch_norm(x=conv2d(x=pool1,
                                output_filters=128,
                                kh=3,
                                kw=3,
                                sh=1,
                                sw=1,
                                padding='SAME',
                                parameter_update_device=device,
                                weight_decay=weight_decay,
                                initializer=initializer,
                                scope='conv2_1',
                                weight_decay_rate=weight_decay_rate),
                       is_training=is_training,
                       scope='bn2_1',
                       parameter_update_device=device))
        conv2_2 = conv2d(x=conv2_1,
                         output_filters=128,
                         kh=3,
                         kw=3,
                         sh=1,
                         sw=1,
                         padding='SAME',
                         parameter_update_device=device,
                         weight_decay=weight_decay,
                         initializer=initializer,
                         scope='conv2_2',
                         weight_decay_rate=weight_decay_rate)
        if 2 in output_high_level_features:
            features.append(conv2_2)
        conv2_2 = relu(
            batch_norm(x=conv2_2,
                       is_training=is_training,
                       scope='bn2_2',
                       parameter_update_device=device))

        pool2 = tf.nn.max_pool(value=conv2_2,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool2')

        ## block 3
        conv3_1 = relu(
            batch_norm(x=conv2d(x=pool2,
                                output_filters=256,
                                kh=3,
                                kw=3,
                                sh=1,
                                sw=1,
                                padding='SAME',
                                parameter_update_device=device,
                                weight_decay=weight_decay,
                                initializer=initializer,
                                scope='conv3_1',
                                weight_decay_rate=weight_decay_rate),
                       is_training=is_training,
                       scope='bn3_1',
                       parameter_update_device=device))
        conv3_2 = relu(
            batch_norm(x=conv2d(x=conv3_1,
                                output_filters=256,
                                kh=3,
                                kw=3,
                                sh=1,
                                sw=1,
                                padding='SAME',
                                parameter_update_device=device,
                                weight_decay=weight_decay,
                                initializer=initializer,
                                scope='conv3_2',
                                weight_decay_rate=weight_decay_rate),
                       is_training=is_training,
                       scope='bn3_2',
                       parameter_update_device=device))
        conv3_3 = conv2d(x=conv3_2,
                         output_filters=256,
                         kh=3,
                         kw=3,
                         sh=1,
                         sw=1,
                         padding='SAME',
                         parameter_update_device=device,
                         weight_decay=weight_decay,
                         initializer=initializer,
                         scope='conv3_3',
                         weight_decay_rate=weight_decay_rate)
        if 3 in output_high_level_features:
            features.append(conv3_3)
        conv3_3 = relu(
            batch_norm(x=conv3_3,
                       is_training=is_training,
                       scope='bn3_3',
                       parameter_update_device=device))
        pool3 = tf.nn.max_pool(value=conv3_3,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool3')

        ## block 4
        conv4_1 = relu(
            batch_norm(x=conv2d(x=pool3,
                                output_filters=512,
                                kh=3,
                                kw=3,
                                sh=1,
                                sw=1,
                                padding='SAME',
                                parameter_update_device=device,
                                weight_decay=weight_decay,
                                initializer=initializer,
                                scope='conv4_1',
                                weight_decay_rate=weight_decay_rate),
                       is_training=is_training,
                       scope='bn4_1',
                       parameter_update_device=device))

        conv4_2 = relu(
            batch_norm(x=conv2d(x=conv4_1,
                                output_filters=512,
                                kh=3,
                                kw=3,
                                sh=1,
                                sw=1,
                                padding='SAME',
                                parameter_update_device=device,
                                weight_decay=weight_decay,
                                initializer=initializer,
                                scope='conv4_2',
                                weight_decay_rate=weight_decay_rate),
                       is_training=is_training,
                       scope='bn4_2',
                       parameter_update_device=device))
        conv4_3 = conv2d(x=conv4_2,
                         output_filters=512,
                         kh=3,
                         kw=3,
                         sh=1,
                         sw=1,
                         padding='SAME',
                         parameter_update_device=device,
                         weight_decay=weight_decay,
                         initializer=initializer,
                         scope='conv4_3',
                         weight_decay_rate=weight_decay_rate)
        if 4 in output_high_level_features:
            features.append(conv4_3)
        conv4_3 = relu(
            batch_norm(x=conv4_3,
                       is_training=is_training,
                       scope='bn4_3',
                       parameter_update_device=device))
        pool4 = tf.nn.max_pool(value=conv4_3,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool4')

        ## block 5
        conv5_1 = relu(
            batch_norm(x=conv2d(x=pool4,
                                output_filters=512,
                                kh=3,
                                kw=3,
                                sh=1,
                                sw=1,
                                padding='SAME',
                                parameter_update_device=device,
                                weight_decay=weight_decay,
                                initializer=initializer,
                                scope='conv5_1',
                                weight_decay_rate=weight_decay_rate),
                       is_training=is_training,
                       scope='bn5_1',
                       parameter_update_device=device))

        conv5_2 = relu(
            batch_norm(x=conv2d(x=conv5_1,
                                output_filters=512,
                                kh=3,
                                kw=3,
                                sh=1,
                                sw=1,
                                padding='SAME',
                                parameter_update_device=device,
                                weight_decay=weight_decay,
                                initializer=initializer,
                                scope='conv5_2',
                                weight_decay_rate=weight_decay_rate),
                       is_training=is_training,
                       scope='bn5_2',
                       parameter_update_device=device))
        conv5_3 = conv2d(x=conv5_2,
                         output_filters=512,
                         kh=3,
                         kw=3,
                         sh=1,
                         sw=1,
                         padding='SAME',
                         parameter_update_device=device,
                         weight_decay=weight_decay,
                         initializer=initializer,
                         scope='conv5_3',
                         weight_decay_rate=weight_decay_rate)
        if 5 in output_high_level_features:
            features.append(conv5_3)
        conv5_3 = relu(
            batch_norm(x=conv5_3,
                       is_training=is_training,
                       scope='bn5_3',
                       parameter_update_device=device))
        pool5 = tf.nn.max_pool(value=conv5_3,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME',
                               name='pool5')
        # block 6
        fc6 = tf.reshape(pool5, [batch_size, -1])
        fc6 = fc(x=fc6,
                 output_size=4096,
                 scope="fc6",
                 weight_decay=weight_decay,
                 initializer=initializer,
                 parameter_update_device=device,
                 weight_decay_rate=weight_decay_rate)
        if 6 in output_high_level_features:
            features.append(fc6)
        fc6 = tf.nn.dropout(x=relu(fc6), keep_prob=keep_prob)

        # block 7
        fc7 = tf.reshape(fc6, [batch_size, -1])
        fc7 = fc(x=fc7,
                 output_size=4096,
                 scope="fc7",
                 weight_decay=weight_decay,
                 initializer=initializer,
                 parameter_update_device=device,
                 weight_decay_rate=weight_decay_rate)
        if 7 in output_high_level_features:
            features.append(fc7)

        return features, return_str
Пример #5
0
def decoder_adobenet_framework(encoded_layer_list,
                               decoder_input_org,
                               is_training,
                               output_width,
                               output_filters,
                               batch_size,
                               decoder_device,
                               scope,
                               initializer,
                               weight_decay,
                               weight_decay_rate,
                               adain_use,
                               reuse=False,
                               other_info=None):

    residual_connection_mode = None
    residual_at_layer = -1
    adain_use = False
    full_feature_list = list()
    with tf.variable_scope(tf.get_variable_scope()):
        with tf.device(decoder_device):
            with tf.variable_scope(scope):
                if reuse:
                    tf.get_variable_scope().reuse_variables()
                normal_conv_resblock1 = normal_conv_resblock(
                    x=decoder_input_org,
                    initializer=initializer,
                    is_training=is_training,
                    layer=1,
                    kh=3,
                    kw=3,
                    sh=1,
                    sw=1,
                    batch_norm_used=True,
                    weight_decay=weight_decay,
                    weight_decay_rate=weight_decay_rate,
                    scope="layer%d_normal_resblock" % 1,
                    parameter_update_devices=decoder_device)
                full_feature_list.append(normal_conv_resblock1)

                dilated_conv_resblock1 = dilated_conv_resblock(
                    x=normal_conv_resblock1,
                    initializer=initializer,
                    is_training=is_training,
                    layer=2,
                    dilation=2,
                    kh=3,
                    kw=3,
                    batch_norm_used=True,
                    weight_decay=weight_decay,
                    weight_decay_rate=weight_decay_rate,
                    scope="layer%d_dilated_resblock" % 2,
                    parameter_update_devices=decoder_device)
                full_feature_list.append(dilated_conv_resblock1)

                dilated_conv_resblock2 = dilated_conv_resblock(
                    x=dilated_conv_resblock1,
                    initializer=initializer,
                    is_training=is_training,
                    layer=3,
                    dilation=4,
                    kh=3,
                    kw=3,
                    batch_norm_used=True,
                    weight_decay=weight_decay,
                    weight_decay_rate=weight_decay_rate,
                    scope="layer%d_dilated_resblock" % 3,
                    parameter_update_devices=decoder_device)
                full_feature_list.append(dilated_conv_resblock2)

                dilated_conv_1 = relu(
                    batch_norm(x=dilated_conv2d(
                        x=dilated_conv_resblock2,
                        output_filters=128,
                        weight_decay_rate=weight_decay_rate,
                        weight_decay=weight_decay,
                        kh=3,
                        kw=3,
                        dilation=2,
                        initializer=initializer,
                        scope="layer%d_dilated_conv" % 4,
                        parameter_update_device=decoder_device,
                        name_prefix=scope),
                               is_training=is_training,
                               scope="layer%d_bn" % 4,
                               parameter_update_device=decoder_device))

                full_feature_list.append(dilated_conv_1)

                generated_img = tf.nn.tanh(
                    conv2d(x=dilated_conv_1,
                           output_filters=1,
                           weight_decay_rate=weight_decay_rate,
                           weight_decay=weight_decay,
                           kh=3,
                           kw=3,
                           sw=1,
                           sh=1,
                           initializer=initializer,
                           scope="layer%d_normal_conv" % 5,
                           parameter_update_device=decoder_device,
                           name_prefix=scope))
                full_feature_list.append(generated_img)

    return_str = "AdobeNet-Decoder %d Layers" % len(full_feature_list)

    return generated_img, full_feature_list, return_str