var_list=para_G) solver_KSD_gp_p = optimizer_KSD(learning_rate=lr_p).minimize(KSD_gp, var_list=para_P) #----------------------------- training ------------------------------------ sess = tf.Session() if load_it == 0: sess.run(tf.global_variables_initializer()) else: saver = tf.train.Saver() saver.restore(sess, save_path=workdir + '/model/{}'.format(load_it)) best_mmd, best_kl, best_rate = 10.0, 10.0, 0. datainput = DataInput(data=true_sample, batch_size=mb_size) start_time = datetime.now() iterations = 0 for it in range(load_it, load_it + n_iter + 1): i_critic = 0 while i_critic <= n_critic: _, loss_d = sess.run( [solver_WGAN_D, D_loss], feed_dict={ z: sample_z(mb_size, z_dim), X_true: datainput.get_batch(), isTraining: True })
fake_sample = sess.run(G_sample, feed_dict={ z: sample_z(show_size, z_dim), isTraining: False }) fake_density = sess.run(Density, feed_dict={ Mesh: fake_sample, isTraining: False }) X_range, Y_range = XY_range[0], XY_range[1] mesh_input = np.stack([np.reshape(X_range, [-1]), np.reshape(Y_range, [-1])], 1) meshinput = DataInput(data=mesh_input, batch_size=mb_size) mesh_f_density = np.zeros([mesh_input.shape[0], 1]) for i in range(mesh_input.shape[0] // mb_size): mesh_f_density[i * mb_size:min((i + 1) * mb_size, mesh_input.shape[0])] = sess.run( Density, feed_dict={ Mesh: meshinput.get_batch(), isTraining: False }) mesh_f_density = np.reshape(mesh_f_density, [X_range.shape[0], X_range.shape[1]]) norm = np.sum(mesh_t_density) mesh_f_density_n = mesh_f_density / np.sum(mesh_f_density) * norm
solver_KSD_gp_g = optimizer_KSD(learning_rate=lr_g).minimize(KSD_gp, var_list=para_G) solver_KSD_gp_p = optimizer_KSD(learning_rate=lr_p).minimize(KSD_gp, var_list=para_P) #----------------------------- training ------------------------------------ sess = tf.Session() if load_it==0: sess.run(tf.global_variables_initializer()) else: saver = tf.train.Saver() saver.restore(sess, save_path=workdir+'/model/{}'.format(load_it)) best_mmd, best_kl, best_rate = 10.0, 10.0, 0. datainput = DataInput(data=true_sample, batch_size=mb_size) start_time = datetime.now() iterations = 0 for it in range(load_it, load_it+n_iter+1): i_critic = 0 while i_critic<=n_critic: _, loss_d = sess.run([solver_GAN_D, D_loss], feed_dict={z: sample_z(mb_size, z_dim), X_true: datainput.get_batch(), isTraining: True}) i_critic += 1 _, loss_g = sess.run([solver_GAN_G, G_loss],