Example #1
0
def hparams_config():
    return {
        'kernel':
        CategoricalParam(choices=['linear', 'poly', 'rbf', 'sigmoid']),
        'C': RealParam(min=0.1, max=5),
        'epsilon': RealParam()
    }
Example #2
0
def get_hparam_config(args):
    return {'d_model': ConstantParam(1500),
            'dropout': RealParam(min=0.),
            'monte_carlo_N': ConstantParam(5),
            'use_monte_carlo_sim': ConstantParam(True),
            'no_mc_fill_val': ConstantParam(0.0),
            'gamma': ConstantParam(0.97),
            'episodes_to_train': DiscreteParam(min=5, max=20),
            'gae_lambda': RealParam(0.9, max=0.999),
            'ppo_eps': ConstantParam(0.2),
            'ppo_batch': ConstantParam(1),
            'ppo_epochs': DiscreteParam(2, max=10),
            'entropy_beta': LogRealParam(),
            'use_true_reward': ConstantParam(args.use_true_reward),
            'baseline_reward': ConstantParam(args.baseline_reward),
            'reward_params': DictParam({'num_layers': DiscreteParam(min=1, max=4),
                                        'd_model': DiscreteParam(min=128, max=1024),
                                        'unit_type': ConstantParam('lstm'),
                                        'demo_batch_size': CategoricalParam([64, 128, 256]),
                                        'irl_alg_num_iter': DiscreteParam(2, max=10),
                                        'use_attention': ConstantParam(False),
                                        'bidirectional': ConstantParam(True),
                                        'dropout': RealParam(),
                                        'use_validity_flag': ConstantParam(not args.no_smiles_validity_flag),
                                        'optimizer': CategoricalParam(
                                            choices=['sgd', 'adam', 'adadelta', 'adagrad', 'adamax', 'rmsprop']),
                                        'optimizer__global__weight_decay': LogRealParam(),
                                        'optimizer__global__lr': LogRealParam()}),
            'agent_params': DictParam({'unit_type': ConstantParam('gru'),
                                       'num_layers': ConstantParam(2),
                                       'stack_width': ConstantParam(1500),
                                       'stack_depth': ConstantParam(200),
                                       'optimizer': ConstantParam('adadelta'),
                                       'optimizer__global__weight_decay': LogRealParam(),
                                       'optimizer__global__lr': LogRealParam()}),
            'critic_params': DictParam({'num_layers': ConstantParam(2),
                                        'd_model': ConstantParam(256),
                                        'unit_type': ConstantParam('lstm'),
                                        'optimizer': ConstantParam('adadelta'),
                                        'optimizer__global__weight_decay': LogRealParam(),
                                        'optimizer__global__lr': LogRealParam()}),
            'expert_model_params': DictParam({'model_dir': ConstantParam('./model_dir/expert_rnn_bin'),
                                              'd_model': ConstantParam(128),
                                              'rnn_num_layers': ConstantParam(2),
                                              'dropout': ConstantParam(0.8),
                                              'is_bidirectional': ConstantParam(True),
                                              'unit_type': ConstantParam('lstm')})
            }
Example #3
0
def get_hparam_config(args):
    config = {
        'unit_type':
        CategoricalParam(choices=['gru', 'lstm']),
        'num_layers':
        DiscreteParam(min=1, max=10),
        "d_model":
        DiscreteParam(min=32, max=1024),
        "stack_width":
        DiscreteParam(min=10, max=128),
        "stack_depth":
        DiscreteParam(min=10, max=64),
        "dropout":
        RealParam(0.0, max=0.3),
        "batch_size":
        CategoricalParam(choices=[32, 64, 128]),

        # optimizer params
        "optimizer":
        CategoricalParam(choices=[
            "sgd", "adam", "adadelta", "adagrad", "adamax", "rmsprop"
        ]),
        "optimizer__global__weight_decay":
        LogRealParam(),
        "optimizer__global__lr":
        LogRealParam(),
    }
    return config
Example #4
0
def hparams_config():
    return {'batch': CategoricalParam(choices=[32, 64, 128]),
            'd_model': DiscreteParam(min=32, max=256),
            'rnn_num_layers': DiscreteParam(min=1, max=3),
            'dropout': RealParam(min=0., max=0.8),
            'is_bidirectional': CategoricalParam(choices=[True, False]),
            'unit_type': CategoricalParam(choices=['gru', 'lstm']),
            'optimizer': CategoricalParam(choices=['sgd', 'adam', 'adadelta', 'adagrad', 'adamax', 'rmsprop']),
            'optimizer__global__weight_decay': LogRealParam(),
            'optimizer__global__lr': LogRealParam()}
Example #5
0
def get_hparam_config(args):
    config = {
        "attn_heads":
        CategoricalParam([1, 2, 4, 8]),
        "attn_layers":
        DiscreteParam(min=1, max=4),
        "lin_dims":
        DiscreteParam(min=64, max=2048, size=DiscreteParam(min=1, max=3)),
        "d_model":
        CategoricalParam(choices=[128, 256, 512, 1024]),
        "d_hidden":
        DiscreteParam(min=10, max=64),
        "stack_width":
        DiscreteParam(min=10, max=64),
        "stack_depth":
        DiscreteParam(min=10, max=64),
        "d_ff":
        DiscreteParam(min=128, max=2048),
        "d_ss":
        DiscreteParam(min=128, max=2024),
        "dropout":
        RealParam(0.0, max=0.5),
        "batch_size":
        CategoricalParam(choices=[32, 64, 128]),

        # optimizer params
        "optimizer":
        CategoricalParam(choices=[
            "sgd", "adam", "adadelta", "adagrad", "adamax", "rmsprop"
        ]),
        "optimizer__global__weight_decay":
        LogRealParam(),
        "optimizer__global__lr":
        LogRealParam(),
    }
    return config
def get_hparam_config(args):
    return {
        'd_model':
        ConstantParam(1500),
        'dropout':
        RealParam(min=0.),
        'monte_carlo_N':
        ConstantParam(5),
        'use_monte_carlo_sim':
        ConstantParam(True),
        'no_mc_fill_val':
        ConstantParam(0.0),
        'gamma':
        ConstantParam(0.97),
        'episodes_to_train':
        DiscreteParam(min=5, max=20),
        'reinforce_max_norm':
        ConstantParam(None),
        'lr_decay_gamma':
        RealParam(),
        'lr_decay_step_size':
        DiscreteParam(min=100, max=1000),
        'xent_lambda':
        ConstantParam(0.0),
        'use_true_reward':
        ConstantParam(args.use_true_reward),
        'bias_mode':
        ConstantParam(args.bias_mode),
        'reward_params':
        DictParam({
            'num_layers':
            ConstantParam(2),
            'd_model':
            ConstantParam(256),
            'unit_type':
            ConstantParam('lstm'),
            'demo_batch_size':
            ConstantParam(32),
            'irl_alg_num_iter':
            ConstantParam(5),
            'use_attention':
            ConstantParam(args.use_attention),
            'use_validity_flag':
            ConstantParam(not args.no_smiles_validity_flag),
            'bidirectional':
            ConstantParam(True),
            'optimizer':
            ConstantParam('adadelta'),
            'optimizer__global__weight_decay':
            ConstantParam(0.0000),
            'optimizer__global__lr':
            ConstantParam(0.001),
        }),
        'agent_params':
        DictParam({
            'unit_type': ConstantParam('gru'),
            'num_layers': ConstantParam(2),
            'stack_width': ConstantParam(1500),
            'stack_depth': ConstantParam(200),
            'optimizer': ConstantParam('adadelta'),
            'optimizer__global__weight_decay': LogRealParam(),
            'optimizer__global__lr': LogRealParam()
        }),
        'expert_model_dir':
        ConstantParam('./model_dir/expert_xgb_reg')
    }