def config( z_projection_depth=512, activation=generator_prelu, final_activation=tf.nn.tanh, depth_reduction=2, layer_filter=None, layer_regularizer=batch_norm_1, block=[standard_block], resize_image_type=1, sigmoid_gate=False, create_method=None ): selector = hc.Selector() if create_method is None: selector.set('create', create) else: selector.set('create', create_method) selector.set("z_projection_depth", z_projection_depth) # Used in the first layer - the linear projection of z selector.set("activation", activation); # activation function used inside the generator selector.set("final_activation", final_activation); # Last layer of G. Should match the range of your input - typically -1 to 1 selector.set("depth_reduction", depth_reduction) # Divides our depth by this amount every time we go up in size selector.set('layer_filter', layer_filter) #Add information to g selector.set('layer_regularizer', batch_norm_1) selector.set('block', block) selector.set('resize_image_type', resize_image_type) selector.set('sigmoid_gate', sigmoid_gate) return selector.random_config()
def discriminator(self): discriminator_opts = { "activation": ['relu', 'lrelu', 'tanh', 'selu', 'prelu', 'crelu'], "final_activation": [None], "block_repeat_count": [1, 2, 3], "block": [ hg.discriminators.common.repeating_block, hg.discriminators.common.standard_block, hg.discriminators.common.strided_block ], "depth_increase": [32], "extra_layers": [0, 1, 2, 3], "extra_layers_reduction": [1, 2, 4], "fc_layer_size": [300, 400, 500], "fc_layers": [0, 1], "first_conv_size": [32], "layers": [3, 4, 5, 6], "initial_depth": [32], "initializer": ['xavier'], "layer_regularizer": [None, 'layer_norm'], "noise": [False, 1e-2], "progressive_enhancement": [False, True], "orthogonal_gain": list(np.linspace(0.1, 2, num=10000)), "random_stddev": list(np.linspace(0.0, 0.1, num=10000)), "distance": ['l1_distance', 'l2_distance'], "class": [ hg.discriminators.pyramid_discriminator.PyramidDiscriminator # hg.discriminators.autoencoder_discriminator.AutoencoderDiscriminator ] } return hc.Selector(discriminator_opts).random_config()
def search(config, inputs, args): metrics = train(config, inputs, args) config_filename = "colorizer-"+str(uuid.uuid4())+'.json' hc.Selector().save(config_filename, config) with open(args.search_output, "a") as myfile: myfile.write(config_filename+","+",".join([str(x) for x in metrics])+"\n")
def config( d_learn_rate=1e-3, d_epsilon=1e-8, d_beta1=0.9, d_beta2=0.999, g_learn_rate=1e-3, g_epsilon=1e-8, g_beta1=0.9, g_beta2=0.999, d_clipped_weights=False, clipped_gradients=False ): selector = hc.Selector() selector.set('create', create) selector.set('run', run) selector.set('d_learn_rate', d_learn_rate) selector.set('d_epsilon', d_epsilon) selector.set('d_beta1', d_beta1) selector.set('d_beta2', d_beta2) selector.set('g_learn_rate', g_learn_rate) selector.set('g_epsilon', g_epsilon) selector.set('g_beta1', g_beta1) selector.set('g_beta2', g_beta2) selector.set('clipped_gradients', clipped_gradients) selector.set('d_clipped_weights', d_clipped_weights) return selector.random_config()
def config(resize=None, layers=None, dense_layers=2, dense_size=16, batch_norm=layer_norm_1): selector = hc.Selector() selector.set("activation", [lrelu]) #prelu("d_")]) selector.set('regularizer', [batch_norm]) # Size of fully connected layers if layers == None: layers = [4] selector.set("layers", layers) #Layers in D selector.set("dense.layers", dense_layers) #Layers in D selector.set("dense.size", dense_size) #Layers in D selector.set('add_noise', [False]) #add noise to input selector.set('noise_stddev', [1e-1]) #the amount of noise to add - always centered at 0 selector.set('regularizers', [[minibatch_regularizer.get_features] ]) # these regularizers get applied at the end of D selector.set('resize', [resize]) selector.set('create', discriminator) return selector.random_config()
def selector(args): selector = hc.Selector() selector.set( 'dtype', tf.float32 ) #The data type to use in our GAN. Only float32 is supported at the moment # Z encoder configuration selector.set('encoders', [[uniform_encoder.config()]]) # Generator configuration selector.set("generator", [resize_conv_generator.config()]) selector.set("trainer", adam_trainer.config()) # Discriminator configuration discriminators = [] for i in range(1): discriminators.append(pyramid_discriminator.config(layers=5)) selector.set("discriminators", [discriminators]) losses = [] for i in range(1): losses.append(wgan_loss.config()) selector.set("losses", [losses]) return selector
def load(configuration, verbose=True): config_file = Configuration.find(configuration, verbose=verbose) if config_file is None: print("[hypergan] Could not find config named:", configuration, "checked paths", Configuration.paths) if verbose: print("[hypergan] Loading config", config_file) return hc.Selector().load(config_file)
def config(): selector = hc.Selector() selector.set("reduce", [tf.reduce_mean]) #reduce_sum, reduce_logexp work selector.set('create', create) selector.set('batch_norm', layer_norm_1) return selector.random_config()
def search(config, args): metrics = train(config, args) config_filename = "classification-"+str(uuid.uuid4())+'.json' hc.Selector().save(config_filename, config) with open(args.search_output, "a") as myfile: print("Writing result") myfile.write(config_filename+","+",".join([str(x) for x in metrics])+"\n")
def config(): selector = hc.Selector() selector.set("reduce", [linear_projection])#tf.reduce_mean, reduce_sum, reduce_logexp work selector.set("label_smooth", list(np.linspace(0.15, 0.35, num=100))) selector.set('discriminator', None) selector.set('create', create) return selector.random_config()
def load(configuration, verbose=True, use_toml=False, prepackaged=False): config_file = Configuration.find(configuration, verbose=verbose, prepackaged=prepackaged) if config_file is None: print("[hypergan] Could not find config named:", configuration, "checked paths", Configuration.paths) if verbose: print("[hypergan] Loading config", config_file) return hc.Selector().load(config_file, load_toml=use_toml)
def fc_discriminator(self): opts = { "activation": ["selu", "lrelu", "relu"], "layer_regularizer": [None, "layer_norm"], "linear_type": [None, "cosine"], "features": [1, 10, 100, 200, 512], "class": "class:hypergan.discriminators.fully_connected_discriminator.FullyConnectedDiscriminator" } return hc.Selector(opts).random_config()
def encoder(self): projections = [] projections.append([hg.encoders.uniform_distribution.identity]) encoder_opts = { 'z': 1, 'min': -1, 'max': 1, "projections": projections, 'class': hg.encoders.uniform_distribution.UniformDistribution } return hc.Selector(encoder_opts).random_config()
def config( reduce=linear_projection, discriminator=None, labels=[[0,-1,-1]] # a,b,c in the paper ): selector = hc.Selector() selector.set("reduce", reduce) selector.set('discriminator', discriminator) selector.set('create', create) selector.set('labels', labels) return selector.random_config()
def search(config, inputs, args): config_name="alignment-"+str(uuid.uuid4()).split("-")[0] config_filename = config_name+'.json' print("Saving config to ", config_filename) hc.Selector().save(config_filename, config) metrics = train(config, inputs, args) with open(args.search_output, "a") as myfile: accuracies = ["%.2f" % sum for sum in (metrics["accuracy"] or [])] diversities = ["%.2f" % sum for sum in (metrics["diversity"] or [])] myfile.write(config_name+","+",".join(accuracies)+",".join(diversities)+"\n")
def config( reduce=tf.reduce_mean, reverse=False, discriminator=None ): selector = hc.Selector() selector.set("reduce", reduce) selector.set('reverse', reverse) selector.set('discriminator', discriminator) selector.set('create', create) return selector.random_config()
def var_loss(self): loss_opts = { 'class': [VralLoss], "target_mean": [-1, -0.5, 0, 0.5, 1], "fake_mean": [-1, -0.5, 0, 0.5, 1], 'reduce': ['reduce_mean', 'reduce_sum', 'reduce_logsumexp'], 'type': ['log_rr', 'log_rf', 'log_fr', 'log_ff', 'log_all'], 'value_function': ['square', 'log', 'original'], 'g_loss': ['l2', 'fr_l2', 'rr_l2'], "r_discriminator": self.fc_discriminator() } loss_opts["f_discriminator"] = loss_opts["r_discriminator"] return hc.Selector(loss_opts).random_config()
def config(d_learn_rate=1e-3, g_learn_rate=1e-3, d_clipped_weights=False, clipped_gradients=False): selector = hc.Selector() selector.set('create', create) selector.set('run', run) selector.set('d_learn_rate', d_learn_rate) selector.set('g_learn_rate', g_learn_rate) selector.set('clipped_gradients', clipped_gradients) selector.set('d_clipped_weights', d_clipped_weights) return selector.random_config()
def config(z=[16, 32, 64], min=-1, max=1, projections=[[identity, modal, sphere]], modes=4): selector = hc.Selector() selector.set('create', create) selector.set('z', z) selector.set('min', min) selector.set('max', max) selector.set('projections', projections) selector.set('modes', modes) return selector.random_config()
def config(reduce=wgan_loss.linear_projection, reverse=False, discriminator=None, label_smooth=list(np.linspace(0.15, 0.35, num=10)), alpha=0.001, beta=0.2, labels=[[0.5, 0, -0.5]]): selector = hc.Selector() selector.set("reduce", reduce) selector.set('reverse', reverse) selector.set('discriminator', discriminator) selector.set("label_smooth", label_smooth) selector.set('create', create) selector.set('alpha', alpha) selector.set('beta', beta) selector.set('labels', labels) return selector.random_config()
def config(resize=None, layers=5): selector = hc.Selector() selector.set("final_activation", [tf.nn.tanh])#prelu("d_")]) selector.set("activation", [lrelu])#prelu("d_")]) selector.set('regularizer', [layer_norm_1]) # Size of fully connected layers selector.set("layers", layers) #Layers in D selector.set("depth_increase", [2])# Size increase of D's features on each layer selector.set('add_noise', [False]) #add noise to input selector.set('layer_filter', [None]) #add information to D selector.set('layer_filter.progressive_enhancement_enabled', True) #add information to D selector.set('noise_stddev', [1e-1]) #the amount of noise to add - always centered at 0 selector.set('resize', [resize]) selector.set('create', discriminator) return selector.random_config()
def __init__(self, overrides): self.options = { 'g_encoder': self.discriminator(), 'z_discriminator': self.discriminator(), 'discriminator': self.discriminator(), 'generator': self.generator(), 'trainer': self.trainer(), 'loss': self.loss(), 'encoder': self.encoder() } alpha_options = { 'g_encoder_layers': [2, 3, 4, 5], 'z_discriminator_layers': [0, 1, 2], 'z_discriminator_extra_layers': [0, 1, 2], 'z_discriminator_extra_layers_reduction': [1, 2], 'cycloss_lambda': [0.1, 0.3, 0.2], 'concat_linear': [64, 128, 256], 'concat_linear_filters': [32, 64, 128, 256], 'skip_linear': [False, True], 'd_layer_filter': [True, False], 'g_layer_filter': [True, False], 'encode_layer_filter': [True, False] } alpha_config = hc.Selector(alpha_options).random_config() self.options['g_encoder']['layers'] = alpha_config.g_encoder_layers self.options['z_discriminator'][ 'layers'] = alpha_config.z_discriminator_layers self.options['z_discriminator'][ 'extra_layers'] = alpha_config.z_discriminator_extra_layers self.options['z_discriminator'][ 'extra_layers_reduction'] = alpha_config.z_discriminator_extra_layers_reduction self.options['cycloss_lambda'] = alpha_config.cycloss_lambda self.options['generator']['concat_linear'] = alpha_config.concat_linear self.options['generator'][ 'concat_linear_filters'] = alpha_config.concat_linear_filters self.options['generator']['skip_linear'] = alpha_config.skip_linear self.options["class"] = "class:hypergan.gans.alpha_gan.AlphaGAN" self.options['d_layer_filter'] = alpha_config.d_layer_filter self.options['g_layer_filter'] = alpha_config.g_layer_filter self.options = {**self.options, **overrides}
def loss(self): loss_opts = { 'reverse': [True, False], 'reduce': ['reduce_mean', 'reduce_sum', 'reduce_logsumexp'], 'gradient_penalty': False, 'labels': [[0, 1, 1]], 'alpha': self.range(), 'beta': self.range(), 'gamma': self.range(), 'label_smooth': self.range(), 'use_k': [False, True], 'initial_k': self.range(), 'k_lambda': self.range(.001), 'type': ['wgan', 'lsgan', 'softmax'], 'minibatch': [False], 'class': [LeastSquaresLoss] } return hc.Selector(loss_opts).random_config()
def encoder(self): projections = [] projections.append([hg.encoders.uniform_encoder.identity]) projections.append([hg.encoders.uniform_encoder.sphere]) projections.append([hg.encoders.uniform_encoder.binary]) projections.append([hg.encoders.uniform_encoder.modal]) projections.append([ hg.encoders.uniform_encoder.modal, hg.encoders.uniform_encoder.identity ]) projections.append([ hg.encoders.uniform_encoder.modal, hg.encoders.uniform_encoder.sphere, hg.encoders.uniform_encoder.identity ]) projections.append([ hg.encoders.uniform_encoder.binary, hg.encoders.uniform_encoder.sphere ]) projections.append([ hg.encoders.uniform_encoder.sphere, hg.encoders.uniform_encoder.identity ]) projections.append([ hg.encoders.uniform_encoder.modal, hg.encoders.uniform_encoder.sphere ]) projections.append([ hg.encoders.uniform_encoder.sphere, hg.encoders.uniform_encoder.identity, hg.encoders.uniform_encoder.gaussian ]) encoder_opts = { 'z': list(np.arange(0, 100) * 2), 'modes': list(np.arange(2, 24)), 'projections': projections, 'min': -1, 'max': 1, 'class': hg.encoders.uniform_encoder.UniformEncoder } return hc.Selector(encoder_opts).random_config()
def loss_instance(self): loss_opts = { 'class': [FDivergenceLoss, StandardLoss, LeastSquaresLoss, WassersteinLoss], "type": [ "kl", "js", "gan", "reverse_kl", "pearson", "squared_hellinger", "total_variation" ], "labels": [[-1, 1, 1]], 'reduce': ['reduce_mean'] #,'reduce_sum']#,'reduce_logsumexp'] } choice = hc.Selector(loss_opts).random_config() if random.choice([True, False]): choice["regularizer"] = choice["type"] if random.choice([True, False]): choice["g_loss_type"] = choice["type"] return choice
def config(): selector = hc.Selector() selector.set('create', create) selector.set('run', run) selector.set('d_learn_rate', 1e-3) selector.set('discriminator_epsilon', 1e-8) selector.set('discriminator_beta1', 0.9) selector.set('discriminator_beta2', 0.999) selector.set('g_learn_rate', 1e-3) selector.set('generator_epsilon', 1e-8) selector.set('generator_beta1', 0.9) selector.set('generator_beta2', 0.999) selector.set('capped', False) selector.set('clipped_discriminator', False) return selector.random_config()
def generator(self): generator_opts = { "activation": ['relu', 'lrelu', 'tanh', 'selu', 'prelu', 'crelu'], "final_depth": [32], "depth_increase": [32], "initializer": [None, 'random'], "random_stddev": list(np.linspace(0.0, 0.1, num=10000)), "final_activation": ['lrelu', 'tanh'], "block_repeat_count": [1, 2, 3], "block": [ hg.generators.common.standard_block, hg.generators.common.inception_block, hg.generators.common.dense_block, hg.generators.common.repeating_block ], "orthogonal_initializer_gain": list(np.linspace(0.1, 2, num=100)), "class": [hg.generators.resize_conv_generator.ResizeConvGenerator] } return hc.Selector(generator_opts).random_config()
def config(g_momentum=0.01, d_momentum=0.00001, g_decay=0.999, d_decay=0.995, d_learn_rate=0.0005, g_learn_rate=0.0004, clipped_gradients=False, clipped_d_weights=0.01): selector = hc.Selector() selector.set('create', create) selector.set('run', run) selector.set('g_momentum', g_momentum) selector.set('d_momentum', d_momentum) selector.set('g_decay', g_decay) selector.set('d_decay', d_decay) selector.set('clipped_gradients', clipped_gradients) selector.set("d_learn_rate", d_learn_rate) selector.set("g_learn_rate", g_learn_rate) selector.set("clipped_d_weights", clipped_d_weights) return selector.random_config()
def config(activation=lrelu, depth_increase=2, final_activation=None, first_conv_size=16, first_strided_conv_size=64, layer_regularizer=layer_norm_1, layers=5, resize=None, noise=None, layer_filter=None, progressive_enhancement=True, fc_layers=0, fc_layer_size=1024, strided=False, create=None): selector = hc.Selector() selector.set("activation", [lrelu]) #prelu("d_")]) selector.set("depth_increase", depth_increase) # Size increase of D's features on each layer selector.set("final_activation", final_activation) selector.set("first_conv_size", first_conv_size) selector.set("first_strided_conv_size", first_conv_size) selector.set("layers", layers) #Layers in D if create is None: selector.set('create', discriminator) else: selector.set('create', create) selector.set('fc_layer_size', fc_layer_size) selector.set('fc_layers', fc_layers) selector.set('layer_filter', layer_filter) #add information to D selector.set('layer_regularizer', layer_regularizer) # Size of fully connected layers selector.set('noise', noise) #add noise to input selector.set('progressive_enhancement', progressive_enhancement) selector.set('resize', resize) selector.set('strided', strided) #TODO: true does not work return selector.random_config()
def trainer(self): tftrainers = [ #tf.train.AdadeltaOptimizer, #tf.train.AdagradOptimizer, #tf.train.GradientDescentOptimizer, tf.train.AdamOptimizer, #tf.train.MomentumOptimizer, tf.train.RMSPropOptimizer ] selector = hc.Selector({ 'learn_rate': [1e-2, 1e-3, 1e-4, 5e-3, 5e-4], 'beta1': self.range(0.8, 0.9999), 'beta2': self.range(0.9, 0.9999), 'epsilon': self.range(1e-8, 0.1), 'momentum': [0, 0.01, 0.1], 'decay': self.range(0.8, 0.9999), 'rho': self.range(), 'initial_accumulator_value': self.range(), 'clipped_gradients': False, 'trainer': tftrainers, 'class': [ #hg.trainers.proportional_control_trainer.create, #hg.trainers.alternating_trainer.AlternatingTrainer hg.trainers.consensus_trainer.ConsensusTrainer ] }) config = selector.random_config() return config