コード例 #1
0
   try: os.mkdir(IMAGES_DIR)
   except: pass
   
   # placeholders for data going into the network
   global_step = tf.Variable(0, name='global_step', trainable=False)
   z           = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 100), name='z')

   train_images_list = data_ops.loadData(DATA_DIR, DATASET)
   filename_queue    = tf.train.string_input_producer(train_images_list)
   real_images       = data_ops.read_input_queue(filename_queue, BATCH_SIZE)

   # generated images
   gen_images = netG(z, BATCH_SIZE)

   # get the output from D on the real and fake data
   errD_real = netD(real_images, BATCH_SIZE)
   errD_fake = netD(gen_images, BATCH_SIZE, reuse=True)

   # cost functions
   errD = tf.reduce_mean(errD_real - errD_fake)
   errG = tf.reduce_mean(errD_fake)

   # tensorboard summaries
   tf.summary.scalar('d_loss', errD)
   tf.summary.scalar('g_loss', errG)
   #tf.summary.image('real_images', real_images, max_outputs=BATCH_SIZE)
   #tf.summary.image('generated_images', gen_images, max_outputs=BATCH_SIZE)
   merged_summary_op = tf.summary.merge_all()

   # get all trainable variables, and split by network G and network D
   t_vars = tf.trainable_variables()
コード例 #2
0
    except:
        pass

    # placeholders for data going into the network
    global_step = tf.Variable(0, name='global_step', trainable=False)
    z = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 100), name='z')

    train_images_list = data_ops.loadData(DATA_DIR, DATASET)
    filename_queue = tf.train.string_input_producer(train_images_list)
    real_images = data_ops.read_input_queue(filename_queue, BATCH_SIZE)

    # generated images
    gen_images = netG(z, BATCH_SIZE)

    # get the output from D on the real and fake data
    errD_real = netD(real_images, BATCH_SIZE, SELU, NORM)
    errD_fake = netD(gen_images, BATCH_SIZE, SELU, NORM, reuse=True)

    # cost functions
    errD = tf.reduce_mean(errD_real) - tf.reduce_mean(errD_fake)
    errG = tf.reduce_mean(errD_fake)

    # gradient penalty
    epsilon = tf.random_uniform([], 0.0, 1.0)
    x_hat = real_images * epsilon + (1 - epsilon) * gen_images
    d_hat = netD(x_hat, BATCH_SIZE, SELU, NORM, reuse=True)
    gradients = tf.gradients(d_hat, x_hat)[0]
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),
                                   reduction_indices=[1]))
    gradient_penalty = 10 * tf.reduce_mean((slopes - 1.0)**2)
    errD += gradient_penalty
コード例 #3
0
    images_dir = checkpoint_dir + 'images/'

    # placeholders for data going into the network
    global_step = tf.Variable(0, name='global_step', trainable=False)
    z = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 1024), name='z')

    train_images_list = data_ops.loadCeleba(DATA_DIR, DATASET)
    filename_queue = tf.train.string_input_producer(train_images_list)
    real_images = data_ops.read_input_queue(filename_queue, BATCH_SIZE)

    # generated images
    gen_images = netG(z, BATCH_SIZE)

    # get the output from D on the real and fake data
    errD_real = tf.reduce_mean(netD(real_images, BATCH_SIZE))
    errD_fake = tf.reduce_mean(netD(gen_images, BATCH_SIZE, reuse=True))

    errD = 0.5 * (tf.square(errD_real - 1)) + 0.5 * (tf.square(errD_fake))
    errG = 0.5 * (tf.square(errD_fake - 1))

    # tensorboard summaries
    tf.summary.scalar('d_loss', errD)
    tf.summary.scalar('g_loss', errG)
    #tf.summary.image('real_images', real_images, max_outputs=BATCH_SIZE)
    #tf.summary.image('generated_images', gen_images, max_outputs=BATCH_SIZE)
    merged_summary_op = tf.summary.merge_all()

    # get all trainable variables, and split by network G and network D
    t_vars = tf.trainable_variables()
    d_vars = [var for var in t_vars if 'd_' in var.name]
コード例 #4
0
ファイル: train.py プロジェクト: cameronfabbri/Cramer-Gan
 def critic(x):
     return tf.norm(netD(x, reuse=True) - netD(gen_images2, reuse=True),
                    axis=1) - tf.norm(netD(x, reuse=True), axis=1)
コード例 #5
0
ファイル: train.py プロジェクト: cameronfabbri/Cramer-Gan
    except:
        pass

    # placeholders for data going into the network
    global_step = tf.Variable(0, name='global_step', trainable=False)
    z1 = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 100), name='z1')
    z2 = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 100), name='z2')

    train_images_list = data_ops.loadData(DATA_DIR, DATASET)
    filename_queue = tf.train.string_input_producer(train_images_list)

    # sample from true data
    real_images = data_ops.read_input_queue(filename_queue, BATCH_SIZE)

    # dummy to initialize D
    dummy = netD(real_images, reuse=False)

    # sample two independent images from the generator
    gen_images1 = netG(z1, BATCH_SIZE)
    gen_images2 = netG(z2, BATCH_SIZE, reuse=True)

    # define the critic
    def critic(x):
        return tf.norm(netD(x, reuse=True) - netD(gen_images2, reuse=True),
                       axis=1) - tf.norm(netD(x, reuse=True), axis=1)

    # sample epsilon from uniform distribution
    epsilon = tf.random_uniform([], 0.0, 1.0)

    # interpolate real and generated first samples
    x_hat = epsilon * real_images + (1 - epsilon) * gen_images1
コード例 #6
0
    except:
        pass

    # placeholders for data going into the network
    global_step = tf.Variable(0, name='global_step', trainable=False)
    z = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 100), name='z')

    train_images_list = data_ops.loadData(DATA_DIR, DATASET)
    filename_queue = tf.train.string_input_producer(train_images_list)
    real_images = data_ops.read_input_queue(filename_queue, BATCH_SIZE)

    # generated images
    gen_images = netG(z, BATCH_SIZE)

    # get the output from D on the real and fake data
    errD_real = netD(real_images)
    errD_fake = netD(gen_images, reuse=True)

    # cost functions
    errD = tf.reduce_mean(errD_real) - tf.reduce_mean(errD_fake)
    errG = tf.reduce_mean(errD_fake)

    # gradient penalty
    #epsilon = tf.random_uniform([], 0.0, 1.0)
    #x_hat = real_images*epsilon + (1-epsilon)*gen_images
    #d_hat = netD(x_hat, reuse=True)
    #gradients = tf.gradients(d_hat, x_hat)[0]
    #gradient_penalty = 10*tf.reduce_mean(tf.maximum(0.0, gradients-1)**2)
    #errD += gradient_penalty

    # penalty of equation 7 in the paper
コード例 #7
0
        os.mkdir(IMAGES_DIR)
    except:
        pass

    # placeholders for data going into the network
    global_step = tf.Variable(0, name='global_step', trainable=False)
    z = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 100), name='z')

    train_images_list = data_ops.loadData(DATA_DIR, DATASET)
    filename_queue = tf.train.string_input_producer(train_images_list)
    real_images = data_ops.read_input_queue(filename_queue, BATCH_SIZE)

    # generated images
    gen_images = netG(z, BATCH_SIZE)

    errD_real, embeddings_real, decoded_real = netD(real_images, BATCH_SIZE)
    errD_fake, embeddings_fake, decoded_fake = netD(gen_images, BATCH_SIZE)

    # cost functions
    margin = 20
    #errD = margin - errD_fake+errD_real
    zero = tf.zeros_like(margin - errD_fake)
    errD = errD_real + tf.maximum(zero, margin - errD_fake)
    pt_loss = pullaway_loss(embeddings_fake, BATCH_SIZE)
    if PULLAWAY == 1:
        print 'Using pullaway'
        errG = errD_fake + 0.1 * pt_loss
    else:
        print 'Not using pullaway'
        errG = errD_fake