def get_default_models(self): model_name = h.fixed("model.name", "weight_decay_vae") model_fn = h.fixed("model.model", "@dim_wise_mask_l1_vae()") beta = h.fixed('vae.beta', self.beta) scale_per_layer = h.fixed('dim_wise_l1_vae.scale_per_layer', self.scale_per_layer) dim = h.fixed('dim_wise_l1_vae.dim', self.dim) all_layers_1 = h.fixed('dim_wise_l1_vae.all_layers', self.all_layers) all_layers_2 = h.fixed('conv_encoder.all_layers', self.all_layers) # make the masks full (no zero-entries) perc_sparse = h.fixed("conv_encoder.perc_sparse", h.discrete(0)) # but allow to modify the entries mask_trainable = h.fixed('masked_layer.mask_trainable', False) lmbds_l2 = h.sweep('dim_wise_mask_l1_vae.lmbd_l2', h.discrete(self.lmbd_l2_range)) lmbds_l1 = h.fixed("dim_wise_l1_vae.lmbd_l1", h.discrete(self.lmbd_l1_range[0])) config_weight_decay = h.zipit([ model_name, model_fn, beta, scale_per_layer, dim, all_layers_1, all_layers_2, perc_sparse, mask_trainable, lmbds_l2, lmbds_l1, ]) all_models = h.chainit([config_weight_decay, ]) return all_models
def get_default_models(): """Our default set of models (6 model * 6 hyperparameters=36 models).""" # BetaVAE config. model_name = h.fixed("model.name", "supervised_vae") model_fn = h.fixed("model.model", "@supervised_vae()") betas = h.sweep("supervised_vae.beta", h.discrete([0., 1., 4.])) omegas = h.sweep("supervised_vae.omega", h.discrete([0.0, 1.0, 10.0])) params_product = h.product([betas, omegas]) config_supervised_vae = h.zipit([model_name, params_product, model_fn]) all_models = h.chainit([config_supervised_vae]) return all_models
def get_default_models(): """Our default set of models (6 model * 6 hyperparameters=36 models).""" # BetaVAE config. model_name = h.fixed("model.name", "balanced_beta_vae") model_fn = h.fixed("model.model", "@balanced_beta_vae()") beta_sizes = h.sweep("balanced_beta_vae.beta_size", h.discrete([0.0, 0.1, 0.5, 1., 2., 4., 8., 16.])) beta_variances = h.sweep("balanced_beta_vae.beta_variance", h.discrete([0.0, 0.1, 0.5, 1., 2., 4., 8., 16.])) betas_product = h.product([beta_sizes, beta_variances]) config_balanced_beta_vae = h.zipit([model_name, betas_product, model_fn]) all_models = h.chainit([config_balanced_beta_vae]) return all_models
def get_default_models(): """Our default set of models (6 model * 6 hyperparameters=36 models).""" # BetaVAE config. model_name = h.fixed("model.name", "augmented_variance_vae") model_fn = h.fixed("model.model", "@augmented_variance_vae()") mean_var_weights = h.sweep("augmented_variance_vae.mean_var_weight", h.discrete([0.0, 0.1, 0.5, 1., 2., 4., 8., 16.])) variance_weights = h.sweep("augmented_variance_vae.variance_weight", h.discrete([0.0, 0.1, 0.5, 1., 2., 4., 8., 16.])) weights_product = h.product([mean_var_weights, variance_weights]) config_balanced_beta_vae = h.zipit([model_name, weights_product, model_fn]) all_models = h.chainit([ config_balanced_beta_vae ]) return all_models
def get_default_models(self): model_name = h.fixed("model.name", "softmax_vae") model_fn = h.fixed("model.model", "@vae()") beta = h.fixed('vae.beta', self.beta) softmax_layers = h.fixed('conv_encoder.softmax_layers', True) scale_temperature = h.fixed('conv_encoder.scale_temperature', self.scale_temperature) softmax_temperature = h.sweep( 'conv_encoder.softmax_temperature', h.discrete(np.logspace(-4., 2., 6, endpoint=False)), ) all_layers = h.fixed('conv_encoder.all_layers', self.all_layers) config_masked = h.zipit([ model_name, model_fn, beta, softmax_layers, scale_temperature, softmax_temperature, all_layers, ]) all_models = h.chainit([config_masked, ]) return all_models
def get_default_models(self): model_name = h.fixed("model.name", "vd_vae") model_fn = h.fixed("model.model", "@vd_vae()") beta = h.fixed('vae.beta', self.beta) all_layers = h.fixed('conv_encoder.all_layers', self.all_layers) vd_layers = h.fixed('conv_encoder.vd_layers', True) vd_threshold = h.fixed('vd_vae.vd_threshold', 3.) scale_per_layer = h.fixed('vd_vae.scale_per_layer', self.scale_per_layer) anneal_kld_from = h.fixed('vd_vae.anneal_kld_from', self.anneal_kld_from) anneal_kld_for = h.fixed('vd_vae.anneal_kld_for', self.anneal_kld_for) lmbd_kld_vd = h.sweep("vd_vae.lmbd_kld_vd", h.discrete([*np.logspace(-5, 1, 6, endpoint=False)])) config_vdm = h.zipit([ model_name, model_fn, beta, all_layers, vd_layers, vd_threshold, scale_per_layer, anneal_kld_from, anneal_kld_for, lmbd_kld_vd, ]) all_models = h.chainit([config_vdm, ]) return all_models
def get_default_models(): """Our default set of models (6 model * 6 hyperparameters=36 models).""" # BetaVAE config. model_name = h.fixed("model.name", "beta_vae") model_fn = h.fixed("model.model", "@vae()") betas = h.sweep("vae.beta", h.discrete([0., 0.5, 1., 2., 4., 8.])) params_product = h.product([betas]) config_vae = h.zipit([model_name, params_product, model_fn]) all_models = h.chainit([config_vae]) return all_models
def get_default_models(): """Our default set of models (6 model * 6 hyperparameters=36 models).""" # BetaVAE config. model_name = h.fixed("model.name", "beta_vae") model_fn = h.fixed("model.model", "@vae()") betas = h.sweep("vae.beta", h.discrete([1., 2., 4., 6., 8., 16.])) config_beta_vae = h.zipit([model_name, betas, model_fn]) # AnnealedVAE config. model_name = h.fixed("model.name", "annealed_vae") model_fn = h.fixed("model.model", "@annealed_vae()") iteration_threshold = h.fixed("annealed_vae.iteration_threshold", 100000) c = h.sweep("annealed_vae.c_max", h.discrete([5., 10., 25., 50., 75., 100.])) gamma = h.fixed("annealed_vae.gamma", 1000) config_annealed_beta_vae = h.zipit( [model_name, c, iteration_threshold, gamma, model_fn]) # FactorVAE config. model_name = h.fixed("model.name", "factor_vae") model_fn = h.fixed("model.model", "@factor_vae()") discr_fn = h.fixed("discriminator.discriminator_fn", "@fc_discriminator") gammas = h.sweep("factor_vae.gamma", h.discrete([10., 20., 30., 40., 50., 100.])) config_factor_vae = h.zipit([model_name, gammas, model_fn, discr_fn]) # DIP-VAE-I config. model_name = h.fixed("model.name", "dip_vae_i") model_fn = h.fixed("model.model", "@dip_vae()") lambda_od = h.sweep("dip_vae.lambda_od", h.discrete([1., 2., 5., 10., 20., 50.])) lambda_d_factor = h.fixed("dip_vae.lambda_d_factor", 10.) dip_type = h.fixed("dip_vae.dip_type", "i") config_dip_vae_i = h.zipit( [model_name, model_fn, lambda_od, lambda_d_factor, dip_type]) # DIP-VAE-II config. model_name = h.fixed("model.name", "dip_vae_ii") model_fn = h.fixed("model.model", "@dip_vae()") lambda_od = h.sweep("dip_vae.lambda_od", h.discrete([1., 2., 5., 10., 20., 50.])) lambda_d_factor = h.fixed("dip_vae.lambda_d_factor", 1.) dip_type = h.fixed("dip_vae.dip_type", "ii") config_dip_vae_ii = h.zipit( [model_name, model_fn, lambda_od, lambda_d_factor, dip_type]) # BetaTCVAE config. model_name = h.fixed("model.name", "beta_tc_vae") model_fn = h.fixed("model.model", "@beta_tc_vae()") betas = h.sweep("beta_tc_vae.beta", h.discrete([1., 2., 4., 6., 8., 10.])) config_beta_tc_vae = h.zipit([model_name, model_fn, betas]) all_models = h.chainit([ config_beta_vae, config_factor_vae, config_dip_vae_i, config_dip_vae_ii, config_beta_tc_vae, config_annealed_beta_vae ]) return all_models
def get_default_models(self): model_name = h.fixed("model.name", "small_vae") model_fn = h.fixed("model.model", "@vae()") beta = h.fixed('vae.beta', self.beta) perc_filters = h.sweep("conv_encoder.perc_units", h.discrete(1 - np.linspace(.1, 1, 6, endpoint=False))) config_vae = h.zipit([ model_name, beta, perc_filters, model_fn, ]) all_models = h.chainit([config_vae, ]) return all_models
def get_default_models(self): model_name = h.fixed("model.name", "proximal_vae") model_fn = h.fixed("model.model", "@proximal_vae()") beta = h.fixed('vae.beta', self.beta) all_layers = h.fixed('proximal_vae.all_layers', self.all_layers) lmbds_prox = h.sweep("proximal_vae.lmbd_prox", h.discrete(self.lmbd_prox_range)) config_proximal = h.zipit([ model_name, model_fn, beta, all_layers, lmbds_prox, ]) all_models = h.chainit([config_proximal, ]) return all_models
def get_default_models(self): model_name = h.fixed("model.name", "wae") model_fn = h.fixed("model.model", "@wae()") scale = h.fixed("wae.scale", 1 / 8) adaptive = h.fixed("wae.adaptive", not self.code_norm) code_norm = h.fixed("conv_encoder.code_normalization", self.code_norm) betas = h.sweep("vae.beta", h.discrete([*self._beta_range])) config_vae = h.zipit([ model_name, model_fn, scale, adaptive, code_norm, betas, ]) all_models = h.chainit([config_vae, ]) return all_models
def get_default_models(self): model_name = h.fixed("model.name", "masked_vae") model_fn = h.fixed("model.model", "@vae()") beta = h.fixed('vae.beta', self.beta) perc_sparse = h.sweep("conv_encoder.perc_sparse", h.discrete([ *np.linspace(.1, 1, 6, endpoint=False) ])) all_layers = h.fixed('conv_encoder.all_layers', self.all_layers) config_masked = h.zipit([ model_name, model_fn, beta, perc_sparse, all_layers, ]) all_models = h.chainit([config_masked, ]) return all_models
def get_default_models(self): # DimWiseL1 config. model_name = h.fixed("model.name", f"dim_wise_l1_{self.dim}_vae") model_fn = h.fixed("model.model", "@dim_wise_l1_vae()") beta = h.fixed('vae.beta', self.beta) scale_per_layer = h.fixed('dim_wise_l1_vae.scale_per_layer', self.scale_per_layer) lmbds_l1 = h.sweep("dim_wise_l1_vae.lmbd_l1", h.discrete([ *np.logspace(-5, -3, 8) ])) dim = h.fixed('dim_wise_l1_vae.dim', self.dim) all_layers = h.fixed('dim_wise_l1_vae.all_layers', self.all_layers) config_dim_wise_l1 = h.zipit([ model_name, model_fn, beta, scale_per_layer, lmbds_l1, dim, all_layers, ]) all_models = h.chainit([config_dim_wise_l1, ]) return all_models
def get_num_latent(sweep): return h.sweep("encoder.num_latent", h.discrete(sweep))