Esempio n. 1
0
    def build_model(self):
        self.global_step = tf.Variable(0, name="global_step", trainable=False)
        self.lr = tf.placeholder(tf.float32, shape=[])
        self.images = tf.placeholder(tf.float32, [self.batch_size] + [self.output_size, self.output_size, self.c_dim],
                                    name='real_images')
        self.sample_images= tf.placeholder(tf.float32, [self.sample_size] + [self.output_size, self.output_size, self.c_dim],
                                        name='sample_images')
        self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')

        tf.summary.histogram("z", self.z)

        self.G = self.generator_mnist(self.z)
        images = tf.reshape(self.images, [self.batch_size, -1])
        G = tf.reshape(self.G, [self.batch_size, -1])

        bandwidths = [2.0, 5.0, 10.0, 20.0, 40.0, 80.0]
        self.kernel_loss, self.ratio_loss = mix_rbf_mmd2_and_ratio(
            G, images, sigmas=bandwidths)

        tf.summary.scalar("kernel_loss", self.kernel_loss)
        tf.summary.scalar("ratio_loss", self.ratio_loss)
        self.kernel_loss = tf.sqrt(self.kernel_loss)

        tf.summary.image("train/input image", self.imageRearrange(tf.clip_by_value(self.images, 0, 1), 8))
        tf.summary.image("train/gen image", self.imageRearrange(tf.clip_by_value(self.G, 0, 1), 8))

        self.sampler = self.generator_mnist(self.z, is_train=False, reuse=True)
        t_vars = tf.trainable_variables()

        self.d_vars = [var for var in t_vars if 'd_' in var.name]
        self.g_vars = [var for var in t_vars if 'g_' in var.name]

        self.saver = tf.train.Saver()
Esempio n. 2
0
    n_samples_sigma = len(samples['vali'])
    batch_multiplier = n_samples_sigma // batch_size
    eval_size = batch_multiplier * batch_size
    print(eval_size)
    eval_eval_size = int(0.2 * eval_size)
    eval_real_PH = tf.placeholder(
        tf.float32, [eval_eval_size, seq_length, num_generated_features])
    eval_sample_PH = tf.placeholder(
        tf.float32, [eval_eval_size, seq_length, num_generated_features])
    n_sigmas = 2
    sigma = tf.get_variable(
        name='sigma',
        shape=n_sigmas,
        initializer=tf.constant_initializer(value=np.power(
            heuristic_sigma_training, np.linspace(-1, 3, num=n_sigmas))))
    mmd2, that = mix_rbf_mmd2_and_ratio(eval_real_PH, eval_sample_PH, sigma)
    with tf.variable_scope("SIGMA_optimizer"):
        sigma_solver = tf.train.RMSPropOptimizer(learning_rate=0.05).minimize(
            -that, var_list=[sigma])
        #sigma_solver = tf.train.AdamOptimizer().minimize(-that, var_list=[sigma])
        #sigma_solver = tf.train.AdagradOptimizer(learning_rate=0.1).minimize(-that, var_list=[sigma])
    sigma_opt_iter = 2000
    sigma_opt_thresh = 0.001
    sigma_opt_vars = [
        var for var in tf.global_variables() if 'SIGMA_optimizer' in var.name
    ]

sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                        log_device_placement=True))
sess.run(tf.global_variables_initializer())
Esempio n. 3
0
X_test = tf.convert_to_tensor(X_test)
Y_test = tf.convert_to_tensor(Y_test)
## model
fc6, fc7, source_logits = alexnet(X_train,num_classes,keep_prob,is_reuse=False)
_,_,logits = alexnet(X_test[:300,...],num_classes,keep_prob,is_reuse=True)
source_fc6 = fc6[:batch_size,...]
target_fc6 = fc6[batch_size:,...]
source_fc7 = fc7[:batch_size,...]
target_fc7 = fc7[batch_size:,...]
## source loss
source_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
                        logits = source_logits[:batch_size,...], labels = ys))

## MMD
bandwidths = [2.0, 5.0, 10.0, 20.0, 40.0, 80.0]
loss_fc6,var6,MMD_fc6 = mix_rbf_mmd2_and_ratio(source_fc6, target_fc6,sigmas=bandwidths)
loss_fc7,var7,MMD_fc7 = mix_rbf_mmd2_and_ratio(source_fc7, target_fc7,sigmas=bandwidths)
#loss_fc8,var8,MMD_fc8 = mix_rbf_mmd2_and_ratio(source_logits[:batch_size,...], source_logits[batch_size:,...],sigmas=bandwidths)
#MMD_fc8 = mix_rbf_mmd2_and_ratio(source_logits[:batch_size,...], source_logits[batch_size:,...],sigmas=bandwidths)
#MMD_fc6 = tf.reduce_mean(tf.square(tf.reduce_mean(source_fc6,0)
#                        -tf.reduce_mean(target_fc6,0)))
#MMD_fc7 = tf.reduce_mean(tf.square(tf.reduce_mean(source_fc7,0)
#                        -tf.reduce_mean(target_fc7,0)))

#MMD_fc8 = tf.reduce_mean(tf.square(tf.reduce_mean(source_logits[:batch_size,...],0)
#                                -tf.reduce_mean(source_logits[batch_size:,...],0)))
## discriminate distance loss
def source_distance(x,y):
    y = tf.cast(tf.argmax(y,axis=1),tf.float32)
    y1,_,_ = tf.unique_with_counts(y)
    TensorArr = tf.TensorArray(tf.float32,size=1, dynamic_size=True,clear_after_read=False)
Esempio n. 4
0
target_fc = features[batch_size:, ...]
source_loss = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=logits[:batch_size, ...],
                                            labels=ys))

# --------- test ----------------------------------
#_,_,test_logit = resnet_model(X_test,reuse=True)
#correct_pred = tf.equal(tf.argmax(test_logit, 1), tf.argmax(Y_test, 1))
#accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

##---------- MMD -----------------------------------
bandwidths = [2.0, 5.0, 10.0, 20.0, 40.0, 80.0]
#bandwidths=[2.0]
#MMD_fc = mix_rbf_mmd2(source_fc,target_fc,sigmas=bandwidths)
loss_fc, var, MMD_fc = mix_rbf_mmd2_and_ratio(source_fc,
                                              target_fc,
                                              sigmas=bandwidths)


##---------- discriminate distance loss ------------
def source_distance(x, y):
    y = tf.cast(tf.argmax(y, axis=1), tf.float32)
    y1, _, _ = tf.unique_with_counts(y)
    TensorArr = tf.TensorArray(tf.float32,
                               size=1,
                               dynamic_size=True,
                               clear_after_read=False)
    x_array = TensorArr.unstack(y1)
    size = x_array.size()
    initial_outputs = tf.TensorArray(dtype=tf.float32, size=size)
    i = tf.constant(0)
Esempio n. 5
0
eval_freq = 50

# get heuristic bandwidth for mmd kernel from evaluation samples
heuristic_sigma_training = median_pairwise_distance(samples['vali'])
best_mmd2_so_far = 1000

# optimise sigma using that (that's t-hat)
batch_multiplier = 5000 // batch_size
eval_size = batch_multiplier * batch_size
eval_eval_size = int(0.2 * eval_size)
eval_real_PH = tf.placeholder(tf.float32, [eval_eval_size, seq_length, num_generated_features])
eval_sample_PH = tf.placeholder(tf.float32, [eval_eval_size, seq_length, num_generated_features])
n_sigmas = 2
sigma = tf.get_variable(name='sigma', shape=n_sigmas, initializer=tf.constant_initializer(
    value=np.power(heuristic_sigma_training, np.linspace(-1, 3, num=n_sigmas))))
mmd2, that = mix_rbf_mmd2_and_ratio(eval_real_PH, eval_sample_PH, sigma)
with tf.variable_scope("SIGMA_optimizer"):
    sigma_solver = tf.train.RMSPropOptimizer(learning_rate=0.05).minimize(-that, var_list=[sigma])
    # sigma_solver = tf.train.AdamOptimizer().minimize(-that, var_list=[sigma])
    # sigma_solver = tf.train.AdagradOptimizer(learning_rate=0.1).minimize(-that, var_list=[sigma])
sigma_opt_iter = 2000
sigma_opt_thresh = 0.001
sigma_opt_vars = [var for var in tf.global_variables() if 'SIGMA_optimizer' in var.name]

sess = tf.Session()
sess.run(tf.global_variables_initializer())

vis_Z = model.sample_Z(batch_size, seq_length, latent_dim, use_time)
if CGAN:
    vis_C = model.sample_C(batch_size, cond_dim, max_val, one_hot)
    if 'eICU_task' in data: