def CAE_C_AE(input, input_labels, reuse=True):
    cae_e, _ = Encoder(input,
                       z_num=encoded_dimension,
                       repeat_num=cnn_layers,
                       hidden_num=node_growth_per_layer,
                       data_format=data_format,
                       reuse=True,
                       var_scope='CAE_Encoder')
    c_e, _ = Encoder(input,
                     z_num=encoded_dimension,
                     repeat_num=cnn_layers,
                     hidden_num=node_growth_per_layer,
                     data_format=data_format,
                     reuse=True,
                     var_scope='C_Encoder')
    ae_e, _ = Encoder(input,
                      z_num=encoded_dimension,
                      repeat_num=cnn_layers,
                      hidden_num=node_growth_per_layer,
                      data_format=data_format,
                      reuse=True,
                      var_scope='AE_Encoder')
    cae_lp, _ = ffnn(cae_e,
                     num_layers=ffnn_num_layers,
                     width=ffnn_width,
                     output_dim=n_labels,
                     activations=ffnn_activations,
                     reuse=True,
                     activate_last_layer=False,
                     var_scope='CAE_FFNN')
    c_lp, _ = ffnn(c_e,
                   num_layers=ffnn_num_layers,
                   width=ffnn_width,
                   output_dim=n_labels,
                   activations=ffnn_activations,
                   reuse=True,
                   activate_last_layer=False,
                   var_scope='C_FFNN')
    cae_aei, _ = Decoder(cae_e,
                         input_channel=image_channels,
                         repeat_num=cnn_layers,
                         hidden_num=node_growth_per_layer,
                         data_format=data_format,
                         reuse=True,
                         final_size=scale_size,
                         var_scope='CAE_Decoder')
    ae_aei, _ = Decoder(ae_e,
                        input_channel=image_channels,
                        repeat_num=cnn_layers,
                        hidden_num=node_growth_per_layer,
                        data_format=data_format,
                        reuse=True,
                        final_size=scale_size,
                        var_scope='AE_Decoder')
    cae_loss_ae = tf.losses.mean_squared_error(input, cae_aei)
    ae_loss_ae = tf.losses.mean_squared_error(input, ae_aei)
    cae_loss_ce = CrossEntropy(cae_lp, input_labels)
    c_loss_ce = CrossEntropy(c_lp, input_labels)
    return cae_aei, ae_aei, cae_lp, c_lp, cae_loss_ae, ae_loss_ae, cae_loss_ce, c_loss_ce
Ejemplo n.º 2
0
def pin_cnn(input,
            lbls=None,
            n_labels=None,
            reuse=False,
            encoded_dimension=16,
            cnn_layers=4,
            node_growth_per_layer=4,
            data_format='NHWC',
            image_channels=3):
    z, enc_vars = Encoder(input,
                          z_num=encoded_dimension,
                          repeat_num=cnn_layers,
                          hidden_num=node_growth_per_layer,
                          data_format=data_format,
                          reuse=reuse)
    # if lbls is None:
    #     z_pin = tf.concat([tf.tanh(z[:, :n_labels]), z[:, n_labels:]], 1)
    # else:
    #     z_pin = tf.concat([tf.tanh(z[:, :n_labels]) * (1. - tf.abs(lbls)) + lbls * tf.abs(lbls), z[:, n_labels:]], 1)
    output, dec_vars = Decoder(z,
                               input_channel=image_channels,
                               repeat_num=cnn_layers,
                               hidden_num=node_growth_per_layer,
                               data_format=data_format,
                               reuse=reuse,
                               final_size=scale_size)
    output = tf.maximum(tf.minimum(output, 1.), 0.)
    return z, output, enc_vars, dec_vars
Ejemplo n.º 3
0
def oae_nonlinear_cnn(input,
                      lbls,
                      n_labels=None,
                      reuse=False,
                      encoded_dimension=16,
                      cnn_layers=4,
                      node_growth_per_layer=4,
                      data_format='NHWC',
                      image_channels=3,
                      var_scope='oae_linear_cnn'):
    with tf.variable_scope(var_scope):
        z, enc_vars = Encoder(input,
                              z_num=encoded_dimension,
                              repeat_num=cnn_layers,
                              hidden_num=node_growth_per_layer,
                              data_format=data_format,
                              reuse=reuse)
        z_o, offset_vars = ffnn(tf.concat([z, lbls], 1),
                                num_layers=3,
                                width=encoded_dimension,
                                output_dim=encoded_dimension,
                                activations=tf.elu,
                                activate_last_layer=False,
                                var_scope='offset',
                                reuse=reuse)
        output, dec_vars = Decoder(z_o,
                                   input_channel=image_channels,
                                   repeat_num=cnn_layers,
                                   hidden_num=node_growth_per_layer,
                                   data_format=data_format,
                                   reuse=reuse,
                                   final_size=scale_size)
        output = tf.maximum(tf.minimum(output, 1.), 0.)
    return z, z_o, output, enc_vars, dec_vars, offset_vars
Ejemplo n.º 4
0
def oae_linear_cnn(input,
                   lbls,
                   n_labels=None,
                   reuse=False,
                   encoded_dimension=16,
                   cnn_layers=4,
                   node_growth_per_layer=4,
                   data_format='NHWC',
                   image_channels=3,
                   var_scope='oae_linear_cnn'):
    with tf.variable_scope(var_scope):
        z, enc_vars = Encoder(input,
                              z_num=encoded_dimension,
                              repeat_num=cnn_layers,
                              hidden_num=node_growth_per_layer,
                              data_format=data_format,
                              reuse=reuse)
        offsets = tf.Variable(shape=[lbls.shape[1], encoded_dimension],
                              dtype=z.dtype,
                              reuse=reuse,
                              name='offsets')
        z_o = tf.add(z, tf.matmul(lbls, offsets))
        output, dec_vars = Decoder(z_o,
                                   input_channel=image_channels,
                                   repeat_num=cnn_layers,
                                   hidden_num=node_growth_per_layer,
                                   data_format=data_format,
                                   reuse=reuse,
                                   final_size=scale_size)
        output = tf.maximum(tf.minimum(output, 1.), 0.)
    return z, z_o, output, enc_vars, dec_vars, offsets
Ejemplo n.º 5
0
def pin_cnn(images, true_labels=None, counter_labels = None, n_labels=None, reuse=False, encoded_dimension=16, cnn_layers=4,
            node_growth_per_layer=4, data_format='NHWC', image_channels=3):
    embeddings, enc_vars = Encoder(images, z_num=encoded_dimension, repeat_num=cnn_layers, hidden_num=node_growth_per_layer,
                          data_format=data_format, reuse=reuse)
    latent_embeddings = embeddings[:, n_labels:]
    scores = embeddings[:, :n_labels]
    estimated_labels = tf.tanh(embeddings[:, :n_labels])
    
    if true_labels is None:
        fixed_labels = estimated_labels
    else:
        fixed_labels = estimated_labels * (1 - tf.abs(true_labels)) + true_labels
    
    autoencoded, dec_vars = Decoder(tf.concat([latent_embeddings, fixed_labels], 1), input_channel=image_channels, repeat_num=cnn_layers,
                               hidden_num=node_growth_per_layer, data_format=data_format, reuse=reuse,
                               final_size=scale_size)
    reencoded_embeddings = Encoder(autoencoded, z_num=encoded_dimension, repeat_num=cnn_layers, hidden_num=node_growth_per_layer,
                          data_format=data_format, reuse=True)
    reencoded_latent_embeddings = reencoded_embeddings[:, n_labels:]
    reencoded_scores = reencoded_embeddings[:, :n_labels]
    reencoded_estimated_labels = tf.tanh(reencoded_embeddings[:, :n_labels])
Ejemplo n.º 6
0
        input=x_trn,
        lbls=img_lbls if lambda_pin_value > 0. else None,
        n_labels=n_labels,
        reuse=False,
        encoded_dimension=encoded_dimension,
        cnn_layers=cnn_layers,
        node_growth_per_layer=node_growth_per_layer,
        data_format=data_format,
        image_channels=image_channels)

    # The model for the generated_data training samples
    y = tf.random_normal([batch_size_g, dimension_g], dtype=tf.float32)
    x_gen, gen_vars = Decoder(y,
                              input_channel=image_channels,
                              repeat_num=cnn_layers,
                              hidden_num=node_growth_per_layer,
                              data_format=data_format,
                              reuse=False,
                              final_size=scale_size,
                              var_scope='Gen')
    x_gen = tf.maximum(tf.minimum(x_gen, 1.), 0.)
    z_gen, x_out_gen, _, _ = pin_cnn(
        input=x_gen,
        n_labels=n_labels,
        reuse=True,
        encoded_dimension=encoded_dimension,
        cnn_layers=cnn_layers,
        node_growth_per_layer=node_growth_per_layer,
        data_format=data_format,
        image_channels=image_channels)

    # Define the losses
def pin_cnn(images,
            true_labels=None,
            counter_labels=None,
            n_labels=None,
            reuse=False,
            encoded_dimension=16,
            cnn_layers=4,
            node_growth_per_layer=4,
            data_format='NHWC',
            image_channels=3):
    embeddings, enc_vars = Encoder(images,
                                   z_num=encoded_dimension,
                                   repeat_num=cnn_layers,
                                   hidden_num=node_growth_per_layer,
                                   data_format=data_format,
                                   reuse=reuse)
    latent_embeddings = embeddings[:, n_labels:]
    scores = embeddings[:, :n_labels]
    estimated_labels = tf.tanh(embeddings[:, :n_labels])

    if true_labels is None:
        fixed_labels = estimated_labels
    else:
        fixed_labels = estimated_labels * (1 -
                                           tf.abs(true_labels)) + true_labels

    autoencoded, dec_vars = Decoder(tf.concat(
        [latent_embeddings, fixed_labels], 1),
                                    input_channel=image_channels,
                                    repeat_num=cnn_layers,
                                    hidden_num=node_growth_per_layer,
                                    data_format=data_format,
                                    reuse=reuse,
                                    final_size=scale_size)
    autoencoded = tf.maximum(tf.minimum(autoencoded, 1.), 0.)
    reencoded_embeddings, _ = Encoder(autoencoded,
                                      z_num=encoded_dimension,
                                      repeat_num=cnn_layers,
                                      hidden_num=node_growth_per_layer,
                                      data_format=data_format,
                                      reuse=True)
    reencoded_latent_embeddings = reencoded_embeddings[:, n_labels:]
    reencoded_scores = reencoded_embeddings[:, :n_labels]
    reencoded_estimated_labels = tf.tanh(reencoded_embeddings[:, :n_labels])
    output = {
        'images': images,
        'true_labels': true_labels,
        'embeddings': embeddings,
        'scores': scores,
        'latent_embeddings': latent_embeddings,
        'estimated_labels': estimated_labels,
        'fixed_labels': fixed_labels,
        'autoencoded': autoencoded,
        'reencoded_embeddings': reencoded_embeddings,
        'reencoded_scores': reencoded_scores,
        'reencoded_latent_embeddings': reencoded_latent_embeddings,
        'reencoded_estimated_labels': reencoded_estimated_labels,
        'enc_vars': enc_vars,
        'dec_vars': dec_vars,
        'losses': {
            'ae':
            tf.reduce_sum(tf.abs(images - autoencoded)) /
            tf.cast(images.shape[0], tf.float32
                    ),  # tf.losses.mean_squared_error(images, autoencoded),
            'label':
            tf.zeros_like(scores) if true_labels is None else CrossEntropy(
                scores, true_labels),
            're_embed':
            tf.losses.mean_squared_error(latent_embeddings,
                                         reencoded_latent_embeddings),
            're_label':
            CrossEntropy(reencoded_scores, fixed_labels)
        }
    }
    if true_labels is not None:
        output['true_labels'] = true_labels

    if counter_labels is None:
        counter_output = 'Nothing here'
    else:
        counter_fixed_labels = estimated_labels * (
            1 - tf.abs(counter_labels)) + counter_labels
        counter_autoencoded, _ = Decoder(tf.concat(
            [latent_embeddings, counter_fixed_labels], 1),
                                         input_channel=image_channels,
                                         repeat_num=cnn_layers,
                                         hidden_num=node_growth_per_layer,
                                         data_format=data_format,
                                         reuse=True,
                                         final_size=scale_size)
        counter_autoencoded = tf.maximum(tf.minimum(counter_autoencoded, 1.),
                                         0.)
        counter_reencoded_embeddings, _ = Encoder(
            counter_autoencoded,
            z_num=encoded_dimension,
            repeat_num=cnn_layers,
            hidden_num=node_growth_per_layer,
            data_format=data_format,
            reuse=True)
        counter_reencoded_latent_embeddings = counter_reencoded_embeddings[:,
                                                                           n_labels:]
        counter_reencoded_scores = counter_reencoded_embeddings[:, :n_labels]
        counter_reencoded_estimated_labels = tf.tanh(
            counter_reencoded_embeddings[:, :n_labels])
        output.update({
            'counter_labels':
            counter_labels,
            'counter_fixed_labels':
            counter_fixed_labels,
            'counter_autoencoded':
            counter_autoencoded,
            'counter_reencoded_scores':
            counter_reencoded_scores,
            'counter_reencoded_latent_embeddings':
            counter_reencoded_latent_embeddings,
            'counter_reencoded_estimated_labels':
            counter_reencoded_estimated_labels
        })
        output['losses'].update({
            'counter_similarity':
            tf.losses.mean_squared_error(autoencoded, counter_autoencoded),
            'counter_re_embed':
            tf.losses.mean_squared_error(latent_embeddings,
                                         counter_reencoded_latent_embeddings),
            'counter_re_label':
            CrossEntropy(counter_reencoded_scores, counter_fixed_labels)
        })
    return output
Ejemplo n.º 8
0
imgs, img_lbls, qr_f, qr_i = img_and_lbl_queue_setup(filenames, labels)
x = imgs
z, enc_vars = Encoder(x,
                      z_num=encoded_dimension,
                      repeat_num=cnn_layers,
                      hidden_num=node_growth_per_layer,
                      data_format=data_format,
                      reuse=False)
if lambda_pin_value > 0.:
    z_pinned = tf.concat([img_lbls, z[:, n_labels:]], 1)
else:
    z_pinned = tf.concat([tf.tanh(z[:, :n_labels]), z[:, n_labels:]], 1)
x_out, dec_vars = Decoder(z_pinned,
                          input_channel=image_channels,
                          repeat_num=cnn_layers,
                          hidden_num=node_growth_per_layer,
                          data_format=data_format,
                          reuse=False,
                          final_size=scale_size)

# The model for the generated_data training samples
y = tf.random_normal([batch_size_g, dimension_g], dtype=tf.float32)
x_g, gen_vars = Decoder(y,
                        input_channel=image_channels,
                        repeat_num=cnn_layers,
                        hidden_num=node_growth_per_layer,
                        data_format=data_format,
                        reuse=False,
                        final_size=scale_size,
                        var_scope='Gen')
z_g, _ = Encoder(x_g,
Ejemplo n.º 9
0
z, enc_vars = Encoder(x,
                      z_num=encoded_dimension,
                      repeat_num=generator_layers,
                      hidden_num=node_growth_per_layer,
                      data_format=data_format,
                      reuse=False)
z_tanh = tf.tanh(z)
z2 = tf.pad(img_lbls, [[0, 0], [0, encoded_dimension - n_labels]])
z_pinned = z_tanh * (1. - tf.abs(z2)) + z2 * tf.abs(z2)
pin_loss = tf.losses.mean_squared_error(z_tanh, z_pinned, weights=tf.abs(z2))
if lambda_pin_value == 0.:
    z_pinned = z
x_out, dec_vars = Decoder(z_pinned,
                          input_channel=num_channels,
                          repeat_num=generator_layers,
                          hidden_num=node_growth_per_layer,
                          data_format=data_format,
                          reuse=False,
                          final_size=scale_size)
ae_loss = tf.losses.mean_squared_error(x, x_out)
lambda_pin = tf.placeholder(tf.float32, [])
overall_loss = ae_loss + lambda_pin * pin_loss

img_ins, lbls_ins, fs_ins = load_practice_images(data_dir,
                                                 n_images=8,
                                                 labels=labels)
x_ins = preprocess(img_ins, image_size=image_size)
z_ins, _ = Encoder(x_ins,
                   z_num=encoded_dimension,
                   repeat_num=generator_layers,
                   hidden_num=node_growth_per_layer,
Ejemplo n.º 10
0
                               repeat_num=cnn_layers,
                               hidden_num=node_growth_per_layer,
                               data_format=data_format,
                               reuse=False)
label_predictions, ffnn_vars = ffnn(embeddings,
                                    num_layers=5,
                                    width=[[2 * n_labels]] * 4 + [[n_labels]],
                                    output_dim=n_labels,
                                    activations=[tf.tanh],
                                    activate_last_layer=False,
                                    scope="FFNN",
                                    reuse=False)
autoencoded_images, dec_vars = Decoder(embeddings,
                                       input_channel=image_channels,
                                       repeat_num=cnn_layers,
                                       hidden_num=node_growth_per_layer,
                                       data_format=data_format,
                                       reuse=False,
                                       final_size=scale_size)

# Run the model on a consistent selection of in-sample pictures
img_oos, lbls_oos, fs_oos = load_practice_images(oos_dir,
                                                 n_images=batch_size_x,
                                                 labels=labels)
x_oos = preprocess(img_oos, image_size=image_size)
e_oos, _ = Encoder(x_oos,
                   z_num=encoded_dimension,
                   repeat_num=cnn_layers,
                   hidden_num=node_growth_per_layer,
                   data_format=data_format,
                   reuse=True)
Ejemplo n.º 11
0
def fader_cnn(images,
              true_labels=None,
              counter_labels=None,
              n_labels=None,
              reuse=False,
              encoded_dimension=16,
              cnn_layers=4,
              node_growth_per_layer=4,
              data_format='NHWC',
              image_channels=3,
              ffnn_layers=3,
              ffnn_width=n_labels,
              ffnn_activations=[tf.tanh]):
    embeddings, enc_vars = Encoder(images,
                                   z_num=encoded_dimension,
                                   repeat_num=cnn_layers,
                                   hidden_num=node_growth_per_layer,
                                   data_format=data_format,
                                   reuse=reuse)
    latent_embeddings = embeddings
    scores, ffnn_vars = ffnn(embeddings,
                             num_layers=ffnn_layers,
                             width=ffnn_width,
                             output_dim=n_labels,
                             activations=ffnn_activations,
                             activate_last_layer=False,
                             var_scope='Fader_FFNN',
                             reuse=reuse)
    estimated_labels = tf.tanh(scores)

    autoencoded, dec_vars = Decoder(tf.concat([embeddings, estimated_labels],
                                              1),
                                    input_channel=image_channels,
                                    repeat_num=cnn_layers,
                                    hidden_num=node_growth_per_layer,
                                    data_format=data_format,
                                    reuse=reuse,
                                    final_size=scale_size)
    autoencoded = tf.maximum(tf.minimum(autoencoded, 1.), 0.)
    reencoded_embeddings, _ = Encoder(autoencoded,
                                      z_num=encoded_dimension,
                                      repeat_num=cnn_layers,
                                      hidden_num=node_growth_per_layer,
                                      data_format=data_format,
                                      reuse=True)
    reencoded_latent_embeddings = reencoded_embeddings
    reencoded_scores, _ = ffnn(embeddings,
                               num_layers=ffnn_layers,
                               width=ffnn_width,
                               output_dim=n_labels,
                               activations=ffnn_activations,
                               activate_last_layer=False,
                               var_scope='Fader_FFNN',
                               reuse=True)
    reencoded_estimated_labels = tf.tanh(reencoded_scores)
    if true_labels is not None:
        fixed_labels = estimated_labels * (1 -
                                           tf.abs(true_labels)) + true_labels
    else:
        fixed_labels = estimated_labels
    output = {
        'latent_embeddings': latent_embeddings,
        'scores': scores,
        'estimated_labels': estimated_labels,
        'fixed_labels': fixed_labels,
        'autoencoded': autoencoded,
        'reencoded_scores': reencoded_scores,
        'reencoded_latent_embeddings': reencoded_latent_embeddings,
        'reencoded_estimated_labels': reencoded_estimated_labels,
        'enc_vars': enc_vars,
        'dec_vars': dec_vars,
        'ffnn_vars': ffnn_vars,
        'images': images,
        'losses': {
            'ae':
            tf.losses.mean_squared_error(images, autoencoded),
            'label':
            CrossEntropy(scores, true_labels)
            if true_labels is not None else tf.zeros_like(scores),
            're_embed':
            tf.losses.mean_squared_error(latent_embeddings,
                                         reencoded_latent_embeddings),
            're_label':
            CrossEntropy(reencoded_scores, fixed_labels)
        }
    }
    if true_labels is not None:
        output['true_labels'] = true_labels

    if counter_labels is None:
        counter_output = 'Nothing here'
    else:
        counter_fixed_labels = estimated_labels * (
            1 - tf.abs(counter_labels)) + counter_labels
        counter_autoencoded, _ = Decoder(tf.concat(
            [latent_embeddings, counter_fixed_labels], 1),
                                         input_channel=image_channels,
                                         repeat_num=cnn_layers,
                                         hidden_num=node_growth_per_layer,
                                         data_format=data_format,
                                         reuse=True,
                                         final_size=scale_size)
        counter_autoencoded = tf.minimum(tf.maximum(counter_autoencoded, 0.),
                                         1.)
        counter_reencoded_embeddings, _ = Encoder(
            counter_autoencoded,
            z_num=encoded_dimension,
            repeat_num=cnn_layers,
            hidden_num=node_growth_per_layer,
            data_format=data_format,
            reuse=True)
        counter_reencoded_latent_embeddings = counter_reencoded_embeddings
        counter_reencoded_scores, _ = ffnn(counter_reencoded_embeddings,
                                           num_layers=ffnn_layers,
                                           width=ffnn_width,
                                           output_dim=n_labels,
                                           activations=ffnn_activations,
                                           activate_last_layer=False,
                                           var_scope='Fader_FFNN',
                                           reuse=True)
        counter_reencoded_estimated_labels = tf.tanh(counter_reencoded_scores)
        output.update({
            'counter_labels':
            counter_labels,
            'counter_fixed_labels':
            counter_fixed_labels,
            'counter_autoencoded':
            counter_autoencoded,
            'counter_reencoded_scores':
            counter_reencoded_scores,
            'counter_reencoded_latent_embeddings':
            counter_reencoded_latent_embeddings,
            'counter_reencoded_estimated_labels':
            counter_reencoded_estimated_labels
        })
        output['losses'].update({
            'counter_similarity':
            tf.losses.mean_squared_error(autoencoded, counter_autoencoded),
            'counter_re_embed':
            tf.losses.mean_squared_error(latent_embeddings,
                                         counter_reencoded_latent_embeddings),
            'counter_re_label':
            CrossEntropy(counter_reencoded_scores, counter_fixed_labels)
        })
    return output
Ejemplo n.º 12
0
           'enc_vars':enc_vars,
           'dec_vars':dec_vars,
           'losses': {'ae': tf.losses.mean_squared_error(images, autoencoded),
                     'label': tf.zeros_like(scores) if true_labels is None else CrossEntropy(scores, true_labels),
                     're_embed': tf.losses.mean_squared_error(latent_embeddings, reencoded_latent_embeddings),
                     're_label': CrossEntropy(reencoded_scores, fixed_labels)
                     }
           }
 if true_labels is not None:
     output['true_labels'] = true_labels
 
 if counter_labels is None:
     counter_output = 'Nothing here'
 else:
     counter_fixed_labels = estimated_labels * (1 - tf.abs(counter_labels)) + counter_labels
     counter_autoencoded, _ = Decoder(tf.concat([latent_embeddings, counter_fixed_labels], 1), input_channel=image_channels, repeat_num=cnn_layers, hidden_num=node_growth_per_layer, data_format=data_format, reuse=True, final_size=scale_size)
     counter_reencoded_embeddings = Encoder(counter_autoencoded, z_num=encoded_dimension, repeat_num=cnn_layers, hidden_num=node_growth_per_layer, data_format=data_format, reuse=True)
     counter_reencoded_latent_embeddings = counter_reencoded_embeddings[:, n_labels:]
     counter_reencoded_scores = counter_reencoded_embeddings[:, :n_labels]
     counter_reencoded_estimated_labels = tf.tanh(counter_reencoded_embeddings[:, :n_labels])
     output.update({'counter_labels': counter_labels, 
                    'counter_fixed_labels':counter_fixed_labels, 
                    'counter_autoencoded':counter_autoencoded, 
                    'counter_reencoded_scores':counter_reencoded_scores, 
                    'counter_reencoded_latent_embeddings':counter_reencoded_latent_embeddings, 
                    'counter_reencoded_estimated_labels':counter_reencoded_estimated_labels
                    })
     output['losses'].update({'counter_similarity': tf.losses.mean_squared_error(autoencoded, counter_autoencoded),
                              'counter_re_embed': tf.losses.mean_squared_error(latent_embeddings, counter_reencoded_latent_embeddings),
                              'counter_re_label': CrossEntropy(counter_reencoded_scores, counter_fixed_labels)
                             })
Ejemplo n.º 13
0
x_np = np.random.uniform(0, 255, [batch_size, scale_size, scale_size, 3]) / 256
x = tf.constant(x_np, dtype=tf.float16)

# x_out, z, disc_vars = DiscriminatorCNN(x_trn=x_trn, input_channel=num_channels, z_num=encoded_dimension, repeat_num=generator_layers, hidden_num=node_growth_per_layer, data_format=data_format, reuse=False)

z, enc_vars = Encoder(x,
                      z_num=encoded_dimension,
                      repeat_num=generator_layers,
                      hidden_num=node_growth_per_layer,
                      data_format=data_format,
                      reuse=False)
x_out, dec_vars = Decoder(z,
                          input_channel=num_channels,
                          repeat_num=generator_layers,
                          hidden_num=node_growth_per_layer,
                          data_format=data_format,
                          reuse=False,
                          final_size=scale_size)

ae_loss = tf.losses.mean_squared_error(x, x_out)

train_ae = tf.train.AdamOptimizer(adam_learning_rate).minimize(
    ae_loss, var_list=enc_vars + dec_vars)
# train_ae = tf.train.AdamOptimizer(adam_learning_rate).minimize(ae_loss, var_list=disc_vars)
init_op = tf.global_variables_initializer()


def prod(x):
    out = 0
    for xi in x:
# starting the managed session routine
#######################################################################
tf.Graph().as_default()

#######################################################################
# model definition
#######################################################################

# The model for the real-data training samples
imgs, img_lbls, qr_f, qr_i = img_and_lbl_queue_setup(filenames, labels, batch_size=batch_size_x)
cae_embeddings, cae_enc_vars = Encoder(imgs, z_num=encoded_dimension, repeat_num=cnn_layers, hidden_num=node_growth_per_layer, data_format=data_format, reuse=False, var_scope='CAE_Encoder')
c_embeddings, c_enc_vars     = Encoder(imgs, z_num=encoded_dimension, repeat_num=cnn_layers, hidden_num=node_growth_per_layer, data_format=data_format, reuse=False, var_scope='C_Encoder')
ae_embeddings, ae_enc_vars   = Encoder(imgs, z_num=encoded_dimension, repeat_num=cnn_layers, hidden_num=node_growth_per_layer, data_format=data_format, reuse=False, var_scope='AE_Encoder')
cae_label_predictions, cae_ffnn_vars = ffnn(cae_embeddings, num_layers=5, width=[[2 * n_labels]] * 4 + [[n_labels]], output_dim=n_labels, activations=[tf.tanh], activate_last_layer=False, reuse=False, var_scope='CAE_FFNN')
c_label_predictions, c_ffnn_vars     = ffnn(  c_embeddings, num_layers=5, width=[[2 * n_labels]] * 4 + [[n_labels]], output_dim=n_labels, activations=[tf.tanh], activate_last_layer=False, reuse=False, var_scope='C_FFNN')
cae_autoencoded_images, cae_dec_vars = Decoder(cae_embeddings, input_channel=image_channels, repeat_num=cnn_layers, hidden_num=node_growth_per_layer, data_format=data_format, reuse=False, final_size=scale_size, var_scope='CAE_Decoder')
ae_autoencoded_images, ae_dec_vars   = Decoder( ae_embeddings, input_channel=image_channels, repeat_num=cnn_layers, hidden_num=node_growth_per_layer, data_format=data_format, reuse=False, final_size=scale_size, var_scope='AE_Decoder')

# Define the losses
lambda_ae = tf.placeholder(tf.float32, [])
cae_loss_ae = tf.losses.mean_squared_error(cae_autoencoded_images, imgs)
ae_loss_ae  = tf.losses.mean_squared_error( ae_autoencoded_images, imgs)
cae_loss_lbls_ce = CrossEntropy(cae_label_predictions, img_lbls)
c_loss_lbls_ce   = CrossEntropy(  c_label_predictions, img_lbls)
cae_loss_lbls_mse = tf.losses.mean_squared_error(img_lbls, tf.tanh(cae_label_predictions), weights=tf.abs(img_lbls))
c_loss_lbls_mse   = tf.losses.mean_squared_error(img_lbls, tf.tanh(  c_label_predictions), weights=tf.abs(img_lbls))
cae_loss_combined = cae_loss_lbls_ce + lambda_ae * cae_loss_ae
c_loss_combined   =   c_loss_lbls_ce
ae_loss_combined  =                    lambda_ae *  ae_loss_ae

# Set up the optimizers