Exemple #1
0
def build_training_graph(x, y, ul_x, lr, mom):
    global_step = tf.get_variable(
        name="global_step",
        shape=[],
        dtype=tf.float32,
        initializer=tf.constant_initializer(0.0),
        trainable=False,
    )
    logit = vat.forward(x)
    nll_loss = L.ce_loss(logit, y)
    with tf.variable_scope(tf.get_variable_scope(), reuse=True):
        if FLAGS.method == 'vat':
            ul_logit = vat.forward(ul_x,
                                   is_training=True,
                                   update_batch_stats=False)
            vat_loss = vat.virtual_adversarial_loss(ul_x, ul_logit)
            additional_loss = vat_loss
        elif FLAGS.method == 'vatent':
            ul_logit = vat.forward(ul_x,
                                   is_training=True,
                                   update_batch_stats=False)
            vat_loss = vat.virtual_adversarial_loss(ul_x, ul_logit)
            ent_loss = L.entropy_y_x(ul_logit)
            additional_loss = vat_loss + ent_loss
        elif FLAGS.method == 'baseline':
            additional_loss = 0
        else:
            raise NotImplementedError
        loss = nll_loss + additional_loss

    opt = tf.train.AdamOptimizer(learning_rate=lr, beta1=mom)
    tvars = tf.trainable_variables()
    grads_and_vars = opt.compute_gradients(loss, tvars)
    train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)
    return loss, train_op, global_step
Exemple #2
0
def build_training_graph(x, y, ul_x, lr, mom):

    logit = vat.forward(x)

    nll_loss = L.ce_loss(logit, y)
    x_reconst = tf.constant(0)
    if FLAGS.method == 'vat':
        ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False)
        vat_loss, r_adv = vat.virtual_adversarial_loss(ul_x, ul_logit)
        x_adv = ul_x + r_adv
        additional_loss = vat_loss

    elif FLAGS.method == 'vatent':
        ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False)
        vat_loss, r_adv = vat.virtual_adversarial_loss(ul_x, ul_logit)
        x_adv = ul_x + r_adv
        ent_loss = L.entropy_y_x(ul_logit)
        additional_loss = vat_loss + ent_loss

    elif FLAGS.method == 'lvat':
        ul_logit = vat.forward(ul_x, is_training=True, update_batch_stats=False)
        
        m_ae = get_ae()
        with tf.variable_scope(SCOPE_ENCODER ):
            if FLAGS.ae_type == 'VAE':
                _,z,_ = m_ae.encoder(ul_x, is_train=False)
            elif FLAGS.ae_type == 'AE':
                z = m_ae.encoder(ul_x, is_train=False)
            elif FLAGS.ae_type == 'Glow':
                print('[DEBUG] ... building Glow encoder')
                with tf.variable_scope('encoder' ):
                    y, logdet, z = m_ae.encoder(ul_x)

        decoder = m_ae.decoder
        if FLAGS.ae_type == 'Glow':
            print('[DEBUG] ... building Glow VAT loss function')
            vat_loss, r_adv_y, r_adv_z = vat.virtual_adversarial_loss_glow((y, logdet, z), ul_logit, decoder)

            print('[DEBUG] ... building Glow decoder')
            with tf.variable_scope(SCOPE_DECODER, reuse=tf.AUTO_REUSE):
                #with tf.variable_scope('decoder' ):
                    x_adv     = decoder((y+r_adv_y, logdet, z+r_adv_z))
                    x_reconst = decoder((y,         logdet, z))

        else:
            vat_loss, r_adv = vat.virtual_adversarial_loss(z, ul_logit, decoder)

            with tf.variable_scope(SCOPE_DECODER, reuse=tf.AUTO_REUSE):
                x_adv     = decoder(z + r_adv, False)
                x_reconst = decoder(z, False)

        additional_loss = vat_loss

    elif FLAGS.method == 'baseline':
        additional_loss = 0
    else:
        raise NotImplementedError

    optimizer = tf.train.AdamOptimizer(learning_rate=lr, beta1=mom) 
    theta_classifier = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=SCOPE_CLASSIFIER)
Exemple #3
0
def build_eval_graph(x, y, ul_x):
    losses = {}
    logit = vat.forward(x, is_training=False, update_batch_stats=False)
    nll_loss = L.ce_loss(logit, y)
    losses['NLL'] = nll_loss
    acc = L.accuracy(logit, y)
    losses['Acc'] = acc
    scope = tf.get_variable_scope()
    scope.reuse_variables()
    at_loss = vat.adversarial_loss(x, y, nll_loss, is_training=True)
    losses['AT_loss'] = at_loss
    ul_logit = vat.forward(ul_x, is_training=False, update_batch_stats=False)
    vat_loss = vat.virtual_adversarial_loss(ul_x, ul_logit, is_training=False)
    losses['VAT_loss'] = vat_loss
    return losses
Exemple #4
0
def build_eval_graph(x, y):
    logit = vat.forward(x, is_training=False, update_batch_stats=False)
    n_corrects = tf.cast(tf.equal(tf.argmax(logit, 1), tf.argmax(y, 1)),
                         tf.int32)
    return tf.reduce_sum(n_corrects), tf.shape(n_corrects)[0]
Exemple #5
0
def build_finetune_graph(x):
    logit = vat.forward(x, is_training=True, update_batch_stats=True)
    with tf.control_dependencies([logit]):
        finetune_op = tf.no_op()
    return finetune_op
Exemple #6
0
def build_eval_graph(x, y, ul_x):
    losses = {}
    logit = vat.forward(x, is_training=False, update_batch_stats=False)
    nll_loss = L.ce_loss(logit, y)
    losses['NLL'] = nll_loss
    acc = L.accuracy(logit, y)
    losses['Acc'] = acc
    scope = tf.get_variable_scope()
    scope.reuse_variables()

    results = {}
    if FLAGS.method == 'vat' or FLAGS.method == 'vatent':
        ul_logit = vat.forward(ul_x, is_training=False, update_batch_stats=False)
        vat_loss, r_adv = vat.virtual_adversarial_loss(ul_x, ul_logit, is_training=False)
        losses['VAT_loss'] = vat_loss
        x_adv = ul_x + r_adv
        x_reconst = ul_x    # dummy for compatible
        y_reconst = tf.argmax(ul_logit, 1)       # dummy for compatible

    elif FLAGS.method == 'lvat':
        ul_logit = vat.forward(ul_x, is_training=False, update_batch_stats=False)

        m_ae = get_ae()
        decoder = m_ae.decoder
        if FLAGS.ae_type == 'Glow':
            print('[DEBUG] ... building Glow encoder in eval graph')
            with tf.variable_scope(SCOPE_ENCODER, reuse=tf.AUTO_REUSE ):
                with tf.variable_scope('encoder' ):
                    y_latent, logdet, z = m_ae.encoder(ul_x)
            lvat_loss, r_adv_y, r_adv_z = vat.virtual_adversarial_loss_glow((y_latent, logdet, z), ul_logit, decoder)
            print('[DEBUG] ... building Glow decoder in eval graph')
            with tf.variable_scope(SCOPE_DECODER, reuse=tf.AUTO_REUSE):
                with tf.variable_scope('decoder' ):
                    x_adv     = decoder((y_latent+r_adv_y, logdet, z+r_adv_z))
                    x_reconst = decoder((y_latent        , logdet, z))

        else:
            with tf.variable_scope(SCOPE_ENCODER, reuse=tf.AUTO_REUSE ):
                if FLAGS.ae_type == 'VAE':
                    _,z,_ = m_ae.encoder(ul_x, is_train=False)
                elif FLAGS.ae_type == 'AE':
                    z = m_ae.encoder(ul_x, is_train=False)
            lvat_loss, r_adv = vat.virtual_adversarial_loss(z, ul_logit, decoder)
            with tf.variable_scope(SCOPE_DECODER, reuse=tf.AUTO_REUSE):
                x_adv     = decoder(z + r_adv, False)
                x_reconst = decoder(z, False)

        losses['LVAT_loss'] = lvat_loss

        logit_reconst = vat.forward(x_reconst, is_training=False, update_batch_stats=False)
        y_reconst = tf.argmax(logit_reconst, 1)

    results['x']         = ul_x
    results['x_reconst'] = x_reconst
    results['y_reconst'] = y_reconst

    results['x_adv'] = x_adv
    results['y_pred'] = tf.argmax(logit, 1)
    results['y_true'] = tf.argmax(y, 1)

    x = tf.reshape(x, (-1, FLAGS.img_size*FLAGS.img_size*3))
    x_adv = tf.reshape(x_adv, (-1, FLAGS.img_size*FLAGS.img_size*3))
    x_reconst = tf.reshape(x_reconst, (-1, FLAGS.img_size*FLAGS.img_size*3))
    results['x_diff'] = tf.norm( x - x_reconst, axis=1)
    results['x_diff_adv'] = tf.norm( x - x_adv, axis=1)

    return losses, results