def train(self, xb1, xb2): """Take a training step with batches from each domain.""" self.iteration += 1 feed = { tbn('xb1:0'): xb1, tbn('xb2:0'): xb2, tbn('lr:0'): self.learning_rate, tbn('is_training:0'): True } _ = self.sess.run([obn('train_op_G')], feed_dict=feed) _ = self.sess.run([obn('train_op_D')], feed_dict=feed)
def train(self, x1=None, x2=None): self.iteration += 1 feed = { tbn('lr:0'): self.args.learning_rate, tbn('is_training:0'): True } if x1 is not None: feed[tbn('xb1:0')] = x1 feed[tbn('xb2:0')] = x2 self.sess.run([obn('train_op_D')], feed_dict=feed) self.sess.run([obn('train_op_G')], feed_dict=feed)
def _restore(self, restore_folder, limit_gpu_fraction, no_gpu=False): tf.reset_default_graph() self.init_session(limit_gpu_fraction, no_gpu) ckpt = tf.train.get_checkpoint_state(restore_folder) self.saver = tf.train.import_meta_graph('{}.meta'.format( ckpt.model_checkpoint_path)) self.saver.restore(self.sess, ckpt.model_checkpoint_path) if self.x1 and self.x2: self.sess.run(obn('initializerx1'), feed_dict={tbn('datasetx1ph:0'): self.x1}) self.sess.run(obn('initializerx2'), feed_dict={tbn('datasetx2ph:0'): self.x2}) self.iteration = 0 print("Model restored from {}".format(restore_folder))
def init_session(self, limit_gpu_fraction=.4, no_gpu=False): if no_gpu: config = tf.ConfigProto(device_count={'GPU': 0}) self.sess = tf.Session(config=config) elif limit_gpu_fraction: gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=limit_gpu_fraction) config = tf.ConfigProto(gpu_options=gpu_options) self.sess = tf.Session(config=config) else: self.sess = tf.Session() if not self.args.restore_folder: self.sess.run(obn('initializerx1'), feed_dict={tbn('datasetx1ph:0'): self.x1}) self.sess.run(obn('initializerx2'), feed_dict={tbn('datasetx2ph:0'): self.x2})