def run(self): if self.method == 'train': self.train() elif self.method == 'build': self.gan = hg.GAN(config=self.gan_config, inputs=self.create_input(blank=True)) if not self.gan.load(self.save_file): raise ValidationException("Could not load model: "+ self.save_file) self.build() elif self.method == 'new': self.new() elif self.method == 'sample': self.gan = hg.GAN(config=self.gan_config, inputs=self.create_input(blank=False)) if not self.gan.load(self.save_file): print("Initializing new model") self.sample_forever()
def setup_gan(config, inputs, args): if "encode" in config: print("CHARGAN") gan = CharGAN(config, inputs=inputs) else: gan = hg.GAN(config, inputs=inputs) gan.load(save_file) return gan
def train(config, args): title = "[hypergan] 2d-test " + config_filename GlobalViewer.set_options(enabled=args.viewer, title=title, viewer_size=1) print("ARGS", args) gan = hg.GAN(config, inputs=Custom2DInputDistribution( {"batch_size": args.batch_size})) gan.name = config_filename if gan.config.use_latent: accuracy_x_to_g = lambda: distribution_accuracy( gan.inputs.next(1), gan.generator(gan.latent.next())) accuracy_g_to_x = lambda: distribution_accuracy( gan.generator(gan.latent.next()), gan.inputs.next(1)) else: accuracy_x_to_g = lambda: distribution_accuracy( gan.inputs.next(1), gan.generator(gan.inputs.next())) accuracy_g_to_x = lambda: distribution_accuracy( gan.generator(gan.inputs.next()), gan.inputs.next(1)) sampler = Custom2DSampler(gan) gan.selected_sampler = sampler samples = 0 steps = args.steps sample_file = "samples/" + config_filename + "/000000.png" os.makedirs(os.path.expanduser(os.path.dirname(sample_file)), exist_ok=True) sampler.sample(sample_file, args.save_samples) metrics = [accuracy_x_to_g, accuracy_g_to_x] sum_metrics = [0 for metric in metrics] broken = False for i in range(steps): if broken: break gan.step() if args.viewer and i % args.sample_every == 0: samples += 1 print("Sampling " + str(samples)) sample_file = "samples/" + config_filename + "/%06d.png" % ( samples) sampler.sample(sample_file, args.save_samples) if i % 100 == 0: for k, metric in enumerate(metrics): _metric = metric().cpu().detach().numpy() sum_metrics[k] += _metric if not np.isfinite(_metric): broken = True break return sum_metrics
def test_config(self): with self.test_session(): config = { 'd_learn_rate': 1e-3, 'g_learn_rate': 1e-3, 'd_trainer': 'rmsprop', 'g_trainer': 'adam' } gan = hg.GAN() trainer = AlternatingTrainer(gan, config) self.assertEqual(trainer.config.d_learn_rate, 1e-3)
def train(config, args): save_file = "save/chargan/model.ckpt" with tf.device(args.device): text_input = TextInput(config, args.batch_size, one_hot=one_hot) gan = hg.GAN(config, inputs=text_input) gan.create() if(args.action != 'search' and os.path.isfile(save_file+".meta")): gan.load(save_file) with gan.session.as_default(): text_input.table.init.run() tf.train.start_queue_runners(sess=gan.session) s = [int(g) for g in gan.generator.sample.get_shape()] x_0 = gan.session.run(gan.inputs.x) z_0 = gan.session.run(gan.encoder.z) ax_sum = 0 ag_sum = 0 diversity = 0.00001 dlog = 0 last_i = 0 samples = 0 vocabulary = text_input.get_vocabulary() for i in range(args.steps): gan.step() if args.action == 'train' and i % args.save_every == 0 and i > 0: print("saving " + save_file) gan.save(save_file) if i % args.sample_every == 0: g, x_val = gan.session.run([gan.generator.sample, gan.inputs.x], {gan.encoder.z: z_0}) bs = np.shape(x_val)[0] samples+=1 print("X: "+text_input.sample_output(x_val[0])) print("G:") for j, g0 in enumerate(g): if j > 4: break print(text_input.sample_output(g0)) if args.config is None: with open("sequence-results-10k.csv", "a") as myfile: myfile.write(config_name+","+str(ax_sum)+","+str(ag_sum)+","+ str(ax_sum+ag_sum)+","+str(ax_sum*ag_sum)+","+str(dlog)+","+str(diversity)+","+str(ax_sum*ag_sum*(1/diversity))+","+str(last_i)+"\n") tf.reset_default_graph() gan.session.close()
def setup_gan(config, inputs, args): gan = hg.GAN(config, inputs=inputs, name=args.config) if (os.path.isfile(save_file + ".meta")): gan.load(save_file) tf.train.start_queue_runners(sess=gan.session) config_name = args.config GlobalViewer.title = "[hypergan] colorizer " + config_name GlobalViewer.enabled = args.viewer return gan
def train(config, args): title = "[hypergan] 2d-test " + args.config GlobalViewer.title = title GlobalViewer.enabled = args.viewer with tf.device(args.device): config.generator['end_features'] = 2 config.generator["class"] = "class:__main__.Custom2DGenerator" # TODO config.discriminator[ "class"] = "class:__main__.Custom2DDiscriminator" # TODO gan = hg.GAN(config, inputs=Custom2DInputDistribution(args)) gan.name = config_name accuracy_x_to_g = distribution_accuracy(gan.inputs.x, gan.generator.sample) accuracy_g_to_x = distribution_accuracy(gan.generator.sample, gan.inputs.x) sampler = Custom2DSampler(gan) tf.train.start_queue_runners(sess=gan.session) samples = 0 steps = args.steps sampler.sample("samples/000000.png", args.save_samples) metrics = [accuracy_x_to_g, accuracy_g_to_x] sum_metrics = [0 for metric in metrics] for i in range(steps): gan.step() if args.viewer and i % args.sample_every == 0: samples += 1 print("Sampling " + str(samples), args.save_samples) sample_file = "samples/%06d.png" % (samples) sampler.sample(sample_file, args.save_samples) if i > steps * 9.0 / 10: for k, metric in enumerate(gan.session.run(metrics)): sum_metrics[k] += metric if i % 300 == 0: for k, metric in enumerate(gan.metrics().keys()): metric_value = gan.session.run(gan.metrics()[metric]) print("--", metric, metric_value) if math.isnan(metric_value) or math.isinf(metric_value): print("Breaking due to invalid metric") return None tf.reset_default_graph() gan.session.close() return sum_metrics
def setup_gan(config, inputs, args): gan = hg.GAN(config, inputs=inputs) gan.create() if (args.action != 'search' and os.path.isfile(save_file + ".meta")): gan.load(save_file) with tf.device(args.device): with gan.session.as_default(): inputs.table.init.run() tf.train.start_queue_runners(sess=gan.session) return gan
def setup_gan(config, inputs, args): gan = hg.GAN(config=config, inputs=inputs) gan.create() if (args.action != 'search' and os.path.isfile(save_file + ".meta")): gan.load(save_file) tf.train.start_queue_runners(sess=gan.session) config_name = args.config title = "[hypergan] autoencode " + config_name GlobalViewer.title = title GlobalViewer.enabled = args.viewer return gan
def test_create(self): with self.test_session(): remove_d_config = hg.Configuration.default() remove_d_config['discriminator'] = None remove_d_config['loss'] = None remove_d_config['trainer'] = None gan = hg.GAN(config=remove_d_config, inputs=MockInput()) discriminator = AutoencoderDiscriminator(gan, config) gan.encoder = gan.create_component(gan.config.encoder) gan.encoder.create() gan.generator = gan.create_component(gan.config.generator) gan.generator.create() net = discriminator.create() gan.create() self.assertEqual(int(net.get_shape()[1]), 32)
def mock_gan(batch_size=1, y=1, config=None): mock_config = config or hc.Config({ "latent": { "class": "function:hypergan.distributions.uniform_distribution.UniformDistribution", "max": 1, "min": -1, "projections": ["function:hypergan.distributions.uniform_distribution.identity"], "z": 128 }, "generator": { "class": "class:hypergan.discriminators.configurable_discriminator.ConfigurableDiscriminator", "defaults": { "activation": "tanh", "initializer": "he_normal" }, "layers": ["linear 32*32*1 activation=null"] }, "discriminator": { "class": "class:hypergan.discriminators.configurable_discriminator.ConfigurableDiscriminator", "defaults": { "activation": "tanh", "initializer": "he_normal" }, "layers": ["linear 1 activation=null"] }, "loss": { "class": "function:hypergan.losses.ragan_loss.RaganLoss", "reduce": "reduce_mean" }, "trainer": { "class": "function:hypergan.trainers.alternating_trainer.AlternatingTrainer", "optimizer": { "class": "function:tensorflow.python.training.adam.AdamOptimizer", "learn_rate": 1e-4 } } }) return hg.GAN(config=mock_config, inputs=MockInput(batch_size=batch_size, y=y))
def setup_gan(config, inputs, args): gan = hg.GAN(config, inputs=inputs) gan.create() if (args.action != 'search' and os.path.isfile(save_file + ".meta")): gan.load(save_file) tf.train.start_queue_runners(sess=gan.session) GlobalViewer.enable() config_name = args.config title = "[hypergan] colorizer " + config_name GlobalViewer.window.set_title(title) return gan
def train(config, args): if (args.viewer): title = "[hypergan] 2d-test " + args.config GlobalViewer.title = title GlobalViewer.enabled = args.viewer with tf.device(args.device): config.generator['end_features'] = 2 gan = hg.GAN(config, inputs=Custom2DInputDistribution(args)) gan.discriminator = Custom2DDiscriminator(gan, config.discriminator) gan.generator = Custom2DGenerator(gan, config.generator) gan.encoder = gan.create_component(gan.config.encoder) gan.encoder.create() gan.generator.create() gan.discriminator.create() gan.create() accuracy_x_to_g = batch_accuracy(gan.inputs.x, gan.generator.sample) accuracy_g_to_x = batch_accuracy(gan.generator.sample, gan.inputs.x) sampler = Custom2DSampler(gan) tf.train.start_queue_runners(sess=gan.session) samples = 0 steps = args.steps sampler.sample("samples/000000.png", args.save_samples) metrics = [accuracy_x_to_g, accuracy_g_to_x] sum_metrics = [0 for metric in metrics] for i in range(steps): gan.step() if args.viewer and i % args.sample_every == 0: samples += 1 print("Sampling " + str(samples), args.save_samples) sample_file = "samples/%06d.png" % (samples) sampler.sample(sample_file, args.save_samples) if i > steps * 9.0 / 10: for k, metric in enumerate(gan.session.run(metrics)): sum_metrics[k] += metric tf.reset_default_graph() gan.session.close() return sum_metrics
def train(self): i=0 if(self.args.ipython): import fcntl fd = sys.stdin.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) self.gan = hg.GAN(config=self.gan_config, inputs=self.create_input(), device=self.args.parameter_server_device) self.gan.cli = self #TODO remove this link self.gan.inputs.next() self.lazy_create() self.trainable_gan = hg.TrainableGAN(self.gan, save_file = self.save_file, devices = self.devices, backend_name = self.args.backend) if self.trainable_gan.load(): print("Model loaded") else: print("Initializing new model") self.trainable_gan.sample(self.sampler, self.sample_path) while((self.steps < self.total_steps or self.total_steps == -1) and not self.gan.destroy): self.step() if self.should_sample: self.should_sample = False self.sample(False) if (self.args.save_every != None and self.args.save_every != -1 and self.args.save_every > 0 and self.steps % self.args.save_every == 0): print(" |= Saving network") self.trainable_gan.save() self.create_path(self.advSavePath+'advSave.txt') if os.path.isfile(self.advSavePath+'advSave.txt'): with open(self.advSavePath+'advSave.txt', 'w') as the_file: the_file.write(str(self.samples)+"\n") if self.args.ipython: self.check_stdin() print("Done training model. Saving") self.trainable_gan.save() print("============================") print("HyperGAN model trained") print("============================")
def train(config, args): title = "[hypergan] 2d-test " + args.config GlobalViewer.title = title GlobalViewer.enabled = args.viewer with tf.device(args.device): config.generator["class"]="class:__main__.Sequence2DGenerator" config.discriminator["class"]="class:__main__.Sequence2DDiscriminator" gan = hg.GAN(config, inputs = Sequence2DInputDistribution(args)) sampler = Sequence2DSampler(gan) tf.train.start_queue_runners(sess=gan.session) samples = 0 steps = args.steps sampler.sample("samples/000000.png", args.save_samples) #metrics = [accuracy_x_to_g, accuracy_g_to_x] #sum_metrics = [0 for metric in metrics] for i in range(steps): gan.step({gan.inputs.current_step: i}) if args.viewer and i % args.sample_every == 0: samples += 1 print("Sampling "+str(samples), args.save_samples) sample_file="samples/%06d.png" % (samples) sampler.sample(sample_file, args.save_samples) #if i > steps * 9.0/10: # for k, metric in enumerate(gan.session.run(metrics)): # sum_metrics[k] += metric #if i % 300 == 0: # for k, metric in enumerate(gan.metrics.keys()): # metric_value = gan.session.run(gan.metrics[metric]) # print("--", metric, metric_value) # if math.isnan(metric_value) or math.isinf(metric_value): # print("Breaking due to invalid metric") # return None tf.reset_default_graph() gan.session.close() return {}#sum_metrics
def optimize_g(g, d, config, initial_graph): config['generator'] = g config['discriminators'] = [d] x, y, f, num_labels, examples_per_epoch = image_loader.labelled_image_tensors_from_directory( args.directory, config['batch_size'], channels=channels, format=args.format, crop=args.crop, width=width, height=height) initial_graph = { 'x': x, 'y': y, 'f': f, 'num_labels': num_labels, 'examples_per_epoch': examples_per_epoch } gan = hg.GAN(config, initial_graph) return gan
def test_validate(self): with self.assertRaises(ValidationException): PyramidDiscriminator(hg.GAN(), {})
def test_config(self): with self.test_session(): loss = SoftmaxLoss(hg.GAN(), loss_config) self.assertTrue(loss.config.test)
def setup_gan(config, inputs, args): gan = hg.GAN(config, inputs=inputs) gan.load(save_file) return gan
def test_config(self): with self.test_session(): loss = LeastSquaresLoss(hg.GAN(), loss_config) self.assertTrue(loss.config.test)
def train(): selector = hg.config.selector(args) config_name = "2d-measure-accuracy-" + str(uuid.uuid4()) config = selector.random_config() config_filename = os.path.expanduser('~/.hypergan/configs/' + config_name + '.json') trainers = [] rms_opts = { 'g_momentum': [0, 0.1, 0.01, 1e-6, 1e-5, 1e-1, 0.9, 0.999, 0.5], 'd_momentum': [0, 0.1, 0.01, 1e-6, 1e-5, 1e-1, 0.9, 0.999, 0.5], 'd_decay': [0.8, 0.9, 0.99, 0.999, 0.995, 0.9999, 1], 'g_decay': [0.8, 0.9, 0.99, 0.999, 0.995, 0.9999, 1], 'clipped_gradients': [False, 1e-2], 'clipped_d_weights': [False, 1e-2], 'd_learn_rate': [1e-3, 1e-4, 5e-4, 1e-6, 4e-4, 5e-5], 'g_learn_rate': [1e-3, 1e-4, 5e-4, 1e-6, 4e-4, 5e-5] } stable_rms_opts = { "clipped_d_weights": 0.01, "clipped_gradients": False, "d_decay": 0.995, "d_momentum": 1e-05, "d_learn_rate": 0.001, "g_decay": 0.995, "g_momentum": 1e-06, "g_learn_rate": 0.0005, } trainers.append(hg.trainers.rmsprop_trainer.config(**rms_opts)) adam_opts = {} adam_opts = { 'd_learn_rate': [1e-3, 1e-4, 5e-4, 1e-2, 1e-6], 'g_learn_rate': [1e-3, 1e-4, 5e-4, 1e-2, 1e-6], 'd_beta1': [0.9, 0.99, 0.999, 0.1, 0.01, 0.2, 1e-8], 'd_beta2': [0.9, 0.99, 0.999, 0.1, 0.01, 0.2, 1e-8], 'g_beta1': [0.9, 0.99, 0.999, 0.1, 0.01, 0.2, 1e-8], 'g_beta2': [0.9, 0.99, 0.999, 0.1, 0.01, 0.2, 1e-8], 'd_epsilon': [1e-8, 1, 0.1, 0.5], 'g_epsilon': [1e-8, 1, 0.1, 0.5], 'd_clipped_weights': [False, 0.01], 'clipped_gradients': [False, 0.01] } trainers.append(hg.trainers.adam_trainer.config(**adam_opts)) sgd_opts = { 'd_learn_rate': [1e-3, 1e-4, 5e-4, 1e-2, 1e-6], 'g_learn_rate': [1e-3, 1e-4, 5e-4, 1e-2, 1e-6], 'd_clipped_weights': [False, 0.01], 'clipped_gradients': [False, 0.01] } trainers.append(hg.trainers.sgd_trainer.config(**sgd_opts)) encoders = [] projections = [] projections.append([ hg.encoders.uniform_encoder.modal, hg.encoders.uniform_encoder.identity ]) projections.append([ hg.encoders.uniform_encoder.modal, hg.encoders.uniform_encoder.sphere, hg.encoders.uniform_encoder.identity ]) projections.append([ hg.encoders.uniform_encoder.binary, hg.encoders.uniform_encoder.sphere ]) projections.append([ hg.encoders.uniform_encoder.sphere, hg.encoders.uniform_encoder.identity ]) projections.append([ hg.encoders.uniform_encoder.modal, hg.encoders.uniform_encoder.sphere ]) projections.append([ hg.encoders.uniform_encoder.sphere, hg.encoders.uniform_encoder.identity, hg.encoders.uniform_encoder.gaussian ]) encoder_opts = { 'z': [16], 'modes': [2, 4, 8, 16], 'projections': projections } stable_encoder_opts = { "max": 1, "min": -1, "modes": 8, "projections": [[ "function:hypergan.encoders.uniform_encoder.modal", "function:hypergan.encoders.uniform_encoder.sphere", "function:hypergan.encoders.uniform_encoder.identity" ]], "z": 16 } losses = [] lamb_loss_opts = { 'reverse': [True, False], 'reduce': [ tf.reduce_mean, hg.losses.wgan_loss.linear_projection, tf.reduce_sum, tf.reduce_logsumexp ], 'labels': [[-1, 1, 0], [0, 1, 1], [0, -1, -1], [1, -1, 0], [0, -1, 1], [0, 1, -1], [0, 0.5, -0.5], [0.5, -0.5, 0], [0.5, 0, -0.5]], 'alpha': [ 0, 1e-3, 1e-2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99, 0.999 ], 'beta': [ 0, 1e-3, 1e-2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99, 0.999 ] } lsgan_loss_opts = { 'reduce': [ tf.reduce_mean, hg.losses.wgan_loss.linear_projection, tf.reduce_sum, tf.reduce_logsumexp ], 'labels': [[-1, 1, 0], [0, 1, 1], [0, -1, -1], [1, -1, 0], [0, -1, 1], [0, 1, -1], [0, 0.5, -0.5], [0.5, -0.5, 0], [0.5, 0, -0.5]] } stable_loss_opts = { "alpha": 0.5, "beta": [0.5, 0.8], "discriminator": None, "label_smooth": 0.26111111111111107, "labels": [[0, -1, -1]], "reduce": "function:tensorflow.python.ops.math_ops.reduce_mean", "reverse": True } #losses.append([hg.losses.wgan_loss.config(**loss_opts)]) losses.append([hg.losses.lamb_gan_loss.config(**lamb_loss_opts)]) #losses.append([hg.losses.lamb_gan_loss.config(**stable_loss_opts)]) #losses.append([hg.losses.lamb_gan_loss.config(**stable_loss_opts)]) losses.append([hg.losses.lsgan_loss.config(**lsgan_loss_opts)]) #encoders.append([hg.encoders.uniform_encoder.config(**encoder_opts)]) encoders.append( [hg.encoders.uniform_encoder.config(**stable_encoder_opts)]) custom_config = { 'model': args.config, 'batch_size': args.batch_size, 'trainer': trainers, 'generator': custom_generator_config(), 'discriminators': [[custom_discriminator_config()]], 'losses': losses, 'encoders': encoders } custom_config_selector = hc.Selector() for key, value in custom_config.items(): custom_config_selector.set(key, value) print("Set ", key, value) custom_config_selection = custom_config_selector.random_config() for key, value in custom_config_selection.items(): config[key] = value config['dtype'] = tf.float32 config = hg.config.lookup_functions(config) def circle(x): spherenet = tf.square(x) spherenet = tf.reduce_sum(spherenet, 1) lam = tf.sqrt(spherenet) return x / tf.reshape(lam, [int(lam.get_shape()[0]), 1]) def modes(x): return tf.round(x * 2) / 2.0 if args.distribution == 'circle': x = tf.random_normal([args.batch_size, 2]) x = circle(x) elif args.distribution == 'modes': x = tf.random_uniform([args.batch_size, 2], -1, 1) x = modes(x) elif args.distribution == 'sin': x = tf.random_uniform((1, args.batch_size), -10.5, 10.5) x = tf.transpose(x) r_data = tf.random_normal((args.batch_size, 1), mean=0, stddev=0.1) xy = tf.sin(0.75 * x) * 7.0 + x * 0.5 + r_data * 1.0 x = tf.concat([xy, x], 1) / 16.0 elif args.distribution == 'arch': offset1 = tf.random_uniform((1, args.batch_size), -10, 10) xa = tf.random_uniform((1, 1), 1, 4) xb = tf.random_uniform((1, 1), 1, 4) x1 = tf.random_uniform((1, args.batch_size), -1, 1) xcos = tf.cos(x1 * np.pi + offset1) * xa xsin = tf.sin(x1 * np.pi + offset1) * xb x = tf.transpose(tf.concat([xcos, xsin], 0)) / 16.0 initial_graph = { 'x': x, 'num_labels': 1, } print("Starting training for: " + config_filename) selector.save(config_filename, config) with tf.device(args.device): gan = hg.GAN(config, initial_graph) accuracy_x_to_g = batch_accuracy(gan.graph.x, gan.graph.g[0]) accuracy_g_to_x = batch_accuracy(gan.graph.g[0], gan.graph.x) s = [int(g) for g in gan.graph.g[0].get_shape()] slice1 = tf.slice(gan.graph.g[0], [0, 0], [s[0] // 2, -1]) slice2 = tf.slice(gan.graph.g[0], [s[0] // 2, 0], [s[0] // 2, -1]) accuracy_g_to_g = batch_accuracy(slice1, slice2) x_0 = gan.sess.run(gan.graph.x) z_0 = gan.sess.run(gan.graph.z[0]) gan.initialize_graph() ax_sum = 0 ag_sum = 0 diversity = 0.00001 dlog = 0 last_i = 0 tf.train.start_queue_runners(sess=gan.sess) for i in range(500000): d_loss, g_loss = gan.train() if (np.abs(d_loss) > 100 or np.abs(g_loss) > 100): ax_sum = ag_sum = 100000.00 break if i % 1000 == 0 and i != 0: ax, ag, agg, dl = gan.sess.run([ accuracy_x_to_g, accuracy_g_to_x, accuracy_g_to_g, gan.graph.d_log ], { gan.graph.x: x_0, gan.graph.z[0]: z_0 }) print("ERROR", ax, ag) if np.abs(ax) > 50.0 or np.abs(ag) > 50.0: ax_sum = ag_sum = 100000.00 break #if(i % 10000 == 0 and i != 0): # g_vars = [var for var in tf.trainable_variables() if 'g_' in var.name] # init = tf.initialize_variables(g_vars) # gan.sess.run(init) if (i > 490000): ax, ag, agg, dl = gan.sess.run([ accuracy_x_to_g, accuracy_g_to_x, accuracy_g_to_g, gan.graph.d_log ], { gan.graph.x: x_0, gan.graph.z[0]: z_0 }) diversity += agg ax_sum += ax ag_sum += ag dlog = dl with open("results.csv", "a") as myfile: myfile.write(config_name + "," + str(ax_sum) + "," + str(ag_sum) + "," + str(ax_sum + ag_sum) + "," + str(ax_sum * ag_sum) + "," + str(dlog) + "," + str(diversity) + "," + str(ax_sum * ag_sum * (1 / diversity)) + "," + str(last_i) + "\n") tf.reset_default_graph() gan.sess.close()
def setup_gan(config, inputs, args): gan = hg.GAN(config, inputs=inputs, batch_size=args.batch_size) gan.inputs.gradient_penalty_label = gan.inputs.feed_y # TODO: Our X dimensions dont always match the G. This causes gradient_penalty to fail. gan.create() return gan
def setup_gan(config, inputs, args): gan = hg.GAN(config, inputs=inputs, batch_size=args.batch_size) return gan
config['x_dims'] = [height, width] config['channels'] = channels config['model'] = 'colorizer' config = hg.config.lookup_functions(config) initial_graph = { 'x': x, 'y': y, 'f': f, 'num_labels': num_labels, 'examples_per_epoch': examples_per_epoch } with tf.device(args.device): gan = hg.GAN(config, initial_graph, graph_type='generator', device=args.device) save_file = os.path.expanduser("~/.hypergan/saves/colorizer.ckpt") gan.load_or_initialize_graph(save_file) tf.train.start_queue_runners(sess=gan.sess) build_file = os.path.expanduser( "~/.hypergan/builds/colorizer/generator.ckpt") saver = tf.train.Saver() saver.save(gan.sess, build_file) print("Saved generator to ", build_file) tf.reset_default_graph() self.sess.close()
def test_cli(self): with self.test_session(): gan = hg.GAN() args = {} cli = hg.CLI(gan, args) self.assertEqual(cli.gan, gan)
def mock_gan(batch_size=1, y=1): return hg.GAN(inputs=MockInput(batch_size=batch_size, y=y))
height=height) config['y_dims']=num_labels config['x_dims']=[height,width] config['channels']=channels config = hg.config.lookup_functions(config) initial_graph = { 'x':x, 'y':y, 'f':f, 'num_labels':num_labels, 'examples_per_epoch':examples_per_epoch } gan = hg.GAN(config, initial_graph) save_file = os.path.expanduser("~/.hypergan/saves/super-resolution.ckpt") gan.load_or_initialize_graph(save_file) tf.train.start_queue_runners(sess=gan.sess) for i in range(1000000): d_loss, g_loss = gan.train() if i % args.save_every == 0 and i > 0: print("Saving " + save_file) gan.save(save_file) if i % args.sample_every == 0 and i > 0: print("Sampling "+str(i)) sample_file = "samples/"+str(i)+".png"
import tensorflow as tf import hyperchamber as hc import numpy as np from hypergan.gan_component import ValidationException from hypergan.ops import TensorflowOps from hypergan.gan_component import GANComponent from hypergan.multi_component import MultiComponent import hypergan as hg from unittest.mock import MagicMock gan = hg.GAN() component = GANComponent(gan=gan, config={'test': True}) class GanComponentTest(tf.test.TestCase): def test_config(self): with self.test_session(): self.assertEqual(component.config.test, True) def test_validate(self): with self.test_session(): self.assertEqual(component.validate(), []) def test_gan(self): with self.test_session(): self.assertEqual(component.gan, gan) def test_ops(self): with self.test_session(): self.assertEqual(type(component.ops), TensorflowOps)