Exemple #1
0
    def addParameterUpdateOps(self, model):
        if self.optimizer not in OPTIMIZER_DICT:
            raise Exception(
                "Optimizer {} unknown. Valid choices are {}".format(
                    self.optimizer, ", ".join(OPTIMIZER_DICT.keys())))
        optimizer_rule = OPTIMIZER_DICT[self.optimizer]

        if optimizer_rule == GRAD_OPTIMIZER.SGD:
            build_sgd(
                model,
                self.learning_rate,
                gamma=self.lr_decay,
                policy=self.lr_policy,
                stepsize=1,
            )
        elif optimizer_rule == GRAD_OPTIMIZER.ADAGRAD:
            build_adagrad(model, self.learning_rate)
        elif optimizer_rule == GRAD_OPTIMIZER.ADAM:
            build_adam(model, self.learning_rate)
        elif optimizer_rule == GRAD_OPTIMIZER.FTRL:
            build_ftrl(model, self.learning_rate)
        else:
            print("Unrecognized in caffe2 setting, using default SGD",
                  optimizer_rule)
            build_sgd(model, self.learning_rate)
Exemple #2
0
 def build_optimizer(self, model):
     build_ftrl(model,
                engine=None,
                alpha=1.0,
                beta=0.1,
                lambda1=0.0,
                lambda2=0.0)
def AddParameterUpdateOps(
    model, optimizer_input="SGD", base_learning_rate=0.01, *args, **kwargs
):
    if optimizer_input not in OPTIMIZER_DICT:
        raise Exception(
            "Optimizer {} unknown. Valid choices are {}"
            .format(optimizer_input, ', '.join(OPTIMIZER_DICT.keys()))
        )
    optimizer_rule = OPTIMIZER_DICT[optimizer_input]

    if optimizer_rule == GRAD_OPTIMIZER.SGD:
        build_sgd(
            model,
            base_learning_rate,
            gamma=kwargs['gamma'],
            policy=kwargs['policy'],
            stepsize=1
        )
    elif optimizer_rule == GRAD_OPTIMIZER.ADAGRAD:
        build_adagrad(model, base_learning_rate)
    elif optimizer_rule == GRAD_OPTIMIZER.ADAM:
        build_adam(model, base_learning_rate)
    elif optimizer_rule == GRAD_OPTIMIZER.FTRL:
        build_ftrl(model, base_learning_rate)
    else:
        print(
            "Unrecognized in caffe2 setting, using default SGD", optimizer_rule
        )
        build_sgd(model, base_learning_rate)
Exemple #4
0
 def build_optimizer(self, model):
     self._skip_gpu = True
     return build_ftrl(model,
                       engine=None,
                       alpha=1.0,
                       beta=0.1,
                       lambda1=0.0,
                       lambda2=0.0)
Exemple #5
0
 def build_optimizer(self, model, **kwargs):
     self._skip_gpu = True
     return build_ftrl(
         model,
         engine=None,
         alpha=1.0,
         beta=0.1,
         lambda1=0.0,
         lambda2=0.0,
         **kwargs
     )