def get_loss(self, xb1, xb2): feed = { tbn('xb1:0'): xb1, tbn('xb2:0'): xb2, tbn('is_training:0'): False } losses = self.sess.run(tf.get_collection('losses'), feed_dict=feed) lstring = ' '.join(['{:.3f}'.format(loss) for loss in losses]) return lstring
def train(self, xb1, xb2): """Take a training step with batches from each domain.""" self.iteration += 1 feed = { tbn('xb1:0'): xb1, tbn('xb2:0'): xb2, tbn('lr:0'): self.learning_rate, tbn('is_training:0'): True } _ = self.sess.run([obn('train_op_G')], feed_dict=feed) _ = self.sess.run([obn('train_op_D')], feed_dict=feed)
def train(self, x1=None, x2=None): self.iteration += 1 feed = { tbn('lr:0'): self.args.learning_rate, tbn('is_training:0'): True } if x1 is not None: feed[tbn('xb1:0')] = x1 feed[tbn('xb2:0')] = x2 self.sess.run([obn('train_op_D')], feed_dict=feed) self.sess.run([obn('train_op_G')], feed_dict=feed)
def get_layer(self, xb1, xb2, name=None): tensor_name = "{}:0".format(name) tensor = tbn(tensor_name) feed = { tbn('xb1:0'): xb1, tbn('xb2:0'): xb2, tbn('is_training:0'): False } layer = self.sess.run(tensor, feed_dict=feed) return layer
def get_loss(self, xb1, xb2): """Return all of the loss values for the given input.""" feed = { tbn('xb1:0'): xb1, tbn('xb2:0'): xb2, tbn('is_training:0'): False } ls = [tns for tns in tf.get_collection('losses')] losses = self.sess.run(ls, feed_dict=feed) lstring = ' '.join(['{:.3f}'.format(loss) for loss in losses]) return lstring
def get_layer(self, xb1, xb2, name): """Get a layer of the network by name for the entire datasets given in xb1 and xb2.""" tensor_name = "{}:0".format(name) tensor = tbn(tensor_name) feed = { tbn('xb1:0'): xb1, tbn('xb2:0'): xb2, tbn('is_training:0'): False } layer = self.sess.run(tensor, feed_dict=feed) return layer
def _restore(self, restore_folder, limit_gpu_fraction, no_gpu=False): tf.reset_default_graph() self.init_session(limit_gpu_fraction, no_gpu) ckpt = tf.train.get_checkpoint_state(restore_folder) self.saver = tf.train.import_meta_graph('{}.meta'.format( ckpt.model_checkpoint_path)) self.saver.restore(self.sess, ckpt.model_checkpoint_path) if self.x1 and self.x2: self.sess.run(obn('initializerx1'), feed_dict={tbn('datasetx1ph:0'): self.x1}) self.sess.run(obn('initializerx2'), feed_dict={tbn('datasetx2ph:0'): self.x2}) self.iteration = 0 print("Model restored from {}".format(restore_folder))
def get_layer(self, xb1, xb2, name=None): tensor_name = "{}:0".format(name) tensor = tbn(tensor_name) feed = { tbn('xb1:0'): xb1, tbn('xb2:0'): xb2, tbn('is_training:0'): False } print('tensor shape', tensor.shape) layer = self.sess.run(tensor, feed_dict=feed) print('layer from get_layer', layer) return layer
def init_session(self, limit_gpu_fraction=.4, no_gpu=False): if no_gpu: config = tf.ConfigProto(device_count={'GPU': 0}) self.sess = tf.Session(config=config) elif limit_gpu_fraction: gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=limit_gpu_fraction) config = tf.ConfigProto(gpu_options=gpu_options) self.sess = tf.Session(config=config) else: self.sess = tf.Session() if not self.args.restore_folder: self.sess.run(obn('initializerx1'), feed_dict={tbn('datasetx1ph:0'): self.x1}) self.sess.run(obn('initializerx2'), feed_dict={tbn('datasetx2ph:0'): self.x2})