def _default_hparams(self): # Data Dimensions default_dict = ParamDict({ 'batch_size': -1, }) # Network params default_dict.update({ 'normalization': 'batch', }) # Misc params default_dict.update({ }) return default_dict
def _default_hparams(self): # put new parameters in here: default_dict = ParamDict({ 'use_convs': False, 'device': None, 'n_rollout_steps': 10, # number of decoding steps 'cond_decode': False, # if True, conditions decoder on prior inputs }) # Network size default_dict.update({ 'state_dim': 1, # dimensionality of the state space 'action_dim': 1, # dimensionality of the action space 'nz_enc': 32, # number of dimensions in encoder-latent space 'nz_vae': 10, # number of dimensions in vae-latent space 'nz_mid': 32, # number of dimensions for internal feature spaces 'nz_mid_lstm': 128, # size of middle LSTM layers 'n_lstm_layers': 1, # number of LSTM layers 'n_processing_layers': 3, # number of layers in MLPs }) # Learned prior default_dict.update({ 'n_prior_nets': 1, # number of prior networks in ensemble 'num_prior_net_layers': 6, # number of layers of the learned prior MLP 'nz_mid_prior': 128, # dimensionality of internal feature spaces for prior net 'nll_prior_train': True, # if True, trains learned prior by maximizing NLL 'learned_prior_type': 'gauss', # distribution type for learned prior, ['gauss', 'gmm', 'flow'] 'n_gmm_prior_components': 5, # number of Gaussian components for GMM learned prior }) # Loss weights default_dict.update({ 'reconstruction_mse_weight': 1., # weight of MSE reconstruction loss 'kl_div_weight': 1., # weight of KL divergence loss }) # add new params to parent params parent_params = super()._default_hparams() parent_params.overwrite(default_dict) return parent_params