async def _get_formatted_image(filename, timesteps, out_size): img = cv2.imread(filename) s = img.shape[1] c = img.shape[2] # truncate height img = img[:timesteps * s, :, :] if out_size: img = resize(img, (timesteps * out_size[0], out_size[1])) imgs = img.reshape([timesteps, out_size[0], out_size[1], c]) return ops.transform(imgs)
def _build_train_tower(self, inputs, batch_size=None, reuse=False): if reuse: tf.get_variable_scope().reuse_variables() X, Z, E = inputs X = ops.transform(X) print(X.name, X.get_shape()) print(Z.name, Z.get_shape()) self._image_summary("Inputs", X) L = self._encoder(X, None, True, reuse) G = self._generator(Z, None, True, reuse) X_hat = self._generator(L, None, True, True) Z_hat = self._encoder(G, None, True, True) D_ = self._discriminator(G, Z, None, True, reuse) D = self._discriminator(X, L, None, True, True) self._image_summary("G", G) self._image_summary("X_hat", X_hat) tf.summary.histogram('D_', D_) tf.summary.histogram('D', D) tf.summary.scalar('D', tf.reduce_mean(D)) tf.summary.scalar('D_', tf.reduce_mean(D_)) tf.summary.scalar('sum_D', tf.reduce_mean(D + D_)) tf.summary.scalar('sig_D', tf.reduce_mean(tf.nn.sigmoid(D))) tf.summary.scalar('sig_D_', tf.reduce_mean(tf.nn.sigmoid(D_))) with tf.name_scope('sampler'): g = self._generator(Z, None, False, True) l = self._encoder(X, None, False, True) x_recon = self._generator(l, None, False, True) d = self._discriminator(X, l, None, False, True) outputs = [g, x_recon, l, d] d_loss, g_loss, e_loss = self._calculate_losses( X, Z, L, G, X_hat, Z_hat, D_, D, E) t_vars = tf.trainable_variables() d_vars = [var for var in t_vars if 'Discriminator' in var.name] g_vars = [var for var in t_vars if 'Generator' in var.name] e_vars = [var for var in t_vars if 'Encoder' in var.name] with tf.name_scope('Grad_compute'): d_grads = self.opt.compute_gradients(d_loss, var_list=d_vars) g_grads = self.opt.compute_gradients(g_loss, var_list=g_vars) e_grads = self.opt.compute_gradients(e_loss, var_list=e_vars) return outputs, [d_loss, g_loss, e_loss], [d_grads, g_grads, e_grads]
def _setup_config(self, config): self.ext = config['ext'] self.s = config['s'] self.c = config['c'] self.lvls = config['lvls'] self.kernel_size = config['kernel_size'] self.z_len = config['z_len'] self.gf_dim = config['gf_dim'] self.df_dim = config['df_dim'] self.sampler_interpolations = config['interpolations'] self.sampler_batch_size = config['sampler_batch_size'] self.activation_fn = config['activation_fn'] self.out_activation_fn = config['out_activation_fn'] d_bn = config['d_bn'] g_bn = config['g_bn'] self.lr = config['lr'] self.optimizer = config['optimizer'] self.loss = config['loss'] self.grad_pen = config['grad_pen'] self.ae_pen = config['ae_pen'] self.gp_lambda = config['gp_lambda'] self.ae_lambda = config['ae_lambda'] self.add_image_summary = config['add_image_summary'] if self.optimizer is 'adam': self.opt = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=0.5, beta2=0.9) elif self.optimizer is 'rms': self.opt = tf.train.RMSPropOptimizer(learning_rate=self.lr) self.d_bn_fn = layers.batch_norm if d_bn else None self.g_bn_fn = layers.batch_norm if g_bn else None self.comments = None try: # Add mean image: img = cv2.imread( os.path.join(self.data_dir, 'mean.{}'.format(self.ext))) mean_img = np.array(img.resize([self.s, self.s])) self.mean_img = tf.constant(ops.transform(mean_img), tf.float32, [1, self.s, self.s, 3]) except: pass