def __init__(self, num_epochs, var, f_lr_mult=5.0, enable_cuda=False):
        AbstractMethod.__init__(self)
        self.g = None
        self.f = None
        self.dev_f_collection = None

        g_models = [
            MLPModel(input_dim=9, layer_widths=[20, 3],
                     activation=nn.LeakyReLU, var=var).double(),
        ]
        f_models = [
            MLPModel(input_dim=9, layer_widths=[20],
                     activation=nn.LeakyReLU, var=var).double(),
        ]
        if torch.cuda.is_available() and enable_cuda:
            for i, g in enumerate(g_models):
                g_models[i] = g.cuda()
            for i, f in enumerate(f_models):
                f_models[i] = f.cuda()

        g_learning_rates = [0.0005, 0.0002, 0.001]
        game_objectives = [
            OptimalMomentObjective(),
        ]
        # g_learning_rates = [0.0005]
        # game_objectives = [OptimalMomentObjective(lambda_1=0.5)]
        learning_setups = []
        for g_lr in g_learning_rates:
            for game_objective in game_objectives:
                learning_setup = {
                    "g_optimizer_factory": OptimizerFactory(
                        # OAdam, lr=float(g_lr), betas=(0.5, 0.9), weight_decay=1e-1),
                        OAdam, lr=float(g_lr), betas=(0.5, 0.9)),
                    "f_optimizer_factory": OptimizerFactory(
                        # OAdam, lr=float(g_lr), betas=(0.5, 0.9), weight_decay=1e-1),
                        OAdam, lr=f_lr_mult*float(g_lr), betas=(0.5, 0.9)),
                    "game_objective": game_objective
                }
                learning_setups.append(learning_setup)

        default_g_opt_factory = OptimizerFactory(
            Adam, lr=0.001, betas=(0.5, 0.9))
        default_f_opt_factory = OptimizerFactory(
            Adam, lr=0.005, betas=(0.5, 0.9))
        g_simple_model_eval = SGDSimpleModelEval()
        f_simple_model_eval = SGDSimpleModelEval()
        learning_eval = FHistoryLearningEvalSGDNoStop(
            num_epochs=num_epochs, eval_freq=20, print_freq=100, batch_size=1024)
        self.model_selection = FHistoryModelSelectionV3(
            g_model_list=g_models,
            f_model_list=f_models,
            learning_args_list=learning_setups,
            default_g_optimizer_factory=default_g_opt_factory,
            default_f_optimizer_factory=default_f_opt_factory,
            g_simple_model_eval=g_simple_model_eval,
            f_simple_model_eval=f_simple_model_eval,
            learning_eval=learning_eval,
            psi_eval_max_no_progress=20, psi_eval_burn_in=50)
        self.default_g_opt_factory = default_g_opt_factory
Example #2
0
    def __init__(self, enable_cuda=False):
        AbstractMethod.__init__(self)
        self.g = None
        self.f = None
        self.dev_f_collection = None

        g_models = [
            MLPModel(input_dim=1, layer_widths=[200, 200],
                     activation=nn.LeakyReLU).double(),
        ]
        f_models = [
            DefaultCNN(cuda=enable_cuda),
            #OtherCNN(cuda=enable_cuda),
            #OtherCNNV2(cuda=enable_cuda),
            #OtherCNNV3(cuda=enable_cuda),
        ]
        if enable_cuda:
            for g in g_models:
                g.cuda()

        g_learning_rates = [0.00010, 0.000050, 0.000020]
        # g_learning_rates = [0.00001]
        game_objective = OptimalMomentObjective()
        # g_learning_rates = [0.0005]
        # game_objectives = [OptimalMomentObjective(lambda_1=0.5)]
        learning_setups = []
        for g_lr in g_learning_rates:
            learning_setup = {
                "g_optimizer_factory": OptimizerFactory(
                    OAdam, lr=g_lr, betas=(0.5, 0.9)),
                "f_optimizer_factory": OptimizerFactory(
                    OAdam, lr=5.0*g_lr, betas=(0.5, 0.9)),
                "game_objective": game_objective
            }
            learning_setups.append(learning_setup)

        default_g_opt_factory = OptimizerFactory(
            Adam, lr=0.001, betas=(0.5, 0.9))
        default_f_opt_factory = OptimizerFactory(
            Adam, lr=0.0001, betas=(0.5, 0.9))
        g_simple_model_eval = GradientDecentSimpleModelEval(
            max_num_iter=4000, max_no_progress=10, eval_freq=100)
        f_simple_model_eval = SGDSimpleModelEval(
            max_num_epoch=50, max_no_progress=10, batch_size=512, eval_freq=1)
        learning_eval = FHistoryLearningEvalSGDNoStop(
            num_epochs=60, eval_freq=1, batch_size=1024)
        self.model_selection = FHistoryModelSelectionV3(
            g_model_list=g_models,
            f_model_list=f_models,
            learning_args_list=learning_setups,
            default_g_optimizer_factory=default_g_opt_factory,
            default_f_optimizer_factory=default_f_opt_factory,
            g_simple_model_eval=g_simple_model_eval,
            f_simple_model_eval=f_simple_model_eval,
            learning_eval=learning_eval,
            psi_eval_max_no_progress=10, psi_eval_burn_in=30,
        )
        self.default_g_opt_factory = default_g_opt_factory