예제 #1
0
    def setOptimizingParams(self,
                            damping_factor_obj: float = 1.0,
                            damping_update_factor_obj: float = 2 / 3,
                            damping_factor_probe: float = 1.0,
                            damping_update_factor_probe: float = 2 / 3):

        with self.graph.as_default():
            self._optparams.obj_optimizer = LMA(
                input_var=self._tf_obj,
                predictions_fn=self._training_predictions_as_obj_fn,
                loss_fn=self._training_loss_fn,
                damping_factor=damping_factor_obj,
                damping_expansion=damping_update_factor_obj,
                name='obj_opt')
            self._optparams.obj_minimize_op = self._optparams.obj_optimizer.minimize(
            )

            if self._probe_recons:
                self._optparams.probe_optimizer = LMA(
                    input_var=self._tf_probe,
                    predictions_fn=self._training_predictions_as_probe_fn,
                    loss_fn=self._training_loss_fn,
                    damping_factor=damping_factor_probe,
                    damping_expansion=damping_update_factor_probe,
                    name='probe_opt')
                self._optparams.probe_minimize_op = self._optparams.probe_optimizer.minimize(
                )

            self._optparams.training_loss_tensor = self._optparams.obj_optimizer._loss_t
        self._optimizers_defined = True
예제 #2
0
 def setOptimizingParams(self):
     
     if self._loss_type in ["poisson", "poisson_surrogate"]:
         loss_hessian_fn = self._training_loss_hessian_fn
     else:
         loss_hessian_fn = None
     
     size = self._batch_train_mods.shape.as_list()[0]
     
     with self.graph.as_default():
         self._optparams.obj_optimizer = LMA(input_var=self._tf_obj,
                                             predictions_fn=self._training_predictions_as_obj_fn,
                                             loss_fn=self._training_loss_fn,
                                             name='obj_opt',
                                             diag_hessian_fn=loss_hessian_fn,
                                             assert_tolerances=False)
         self._optparams.obj_minimize_op = self._optparams.obj_optimizer.minimize()
         
         if self._probe_recons:
             self._optparams.probe_optimizer = LMA(input_var=self._tf_probe,
                                                   predictions_fn=self._training_predictions_as_probe_fn,
                                                   loss_fn=self._training_loss_fn,
                                                   name='probe_opt',
                                                   diag_hessian_fn=loss_hessian_fn,
                                                   assert_tolerances=False)
             self._optparams.probe_minimize_op = self._optparams.probe_optimizer.minimize()
         
         self._optparams.training_loss_tensor = self._optparams.obj_optimizer._loss_t
     self._optimizers_defined = True
    def setOptimizingParams(self,
                            cg_tol: float = 1e-5,
                            max_cg_iter: int = 10,
                            grad_norm_reg_pow: int = 0):
        if self._loss_type in ["poisson", "poisson_surrogate"]:
            loss_hessian_fn = self._training_loss_hessian_fn
            squared_loss = False
        else:
            loss_hessian_fn = None
            squared_loss = True

        size = self._batch_train_mods.shape.as_list()[0]

        with self.graph.as_default():
            self._optparams.optimizer = LMA(
                input_var=self._tf_var,
                predictions_fn=self._training_predictions_fn,
                loss_fn=self._training_loss_fn,
                name='opt',
                diag_hessian_fn=loss_hessian_fn,
                squared_loss=squared_loss,
                max_cg_iter=max_cg_iter,
                min_cg_tol=cg_tol,
                grad_norm_reg_pow=grad_norm_reg_pow)
            self._optparams.minimize_op = self._optparams.optimizer.minimize()

            self._optparams.training_loss_tensor = self._optparams.optimizer._loss_t
        self._optimizers_defined = True
예제 #4
0
class LMAOptimizer(Optimizer):
    def __init__(self,
                 input_var: tf.Variable,
                 predictions_fn: Callable,
                 loss_fn: Callable,
                 diag_hessian_fn: Callable = None,
                 max_cg_iter: int = 100,
                 initial_update_delay: int = 0,
                 update_frequency: int = 1,
                 **extra_init_kwargs: int):
        super().__init__(initial_update_delay, update_frequency)
        print('Extra initial parameters:', extra_init_kwargs)
        self._optimizer = LMA(input_var=input_var,
                              predictions_fn=predictions_fn,
                              loss_fn=loss_fn,
                              diag_hessian_fn=diag_hessian_fn,
                              max_cg_iter=max_cg_iter,
                              **extra_init_kwargs)

    def setupMinimizeOp(self):
        self._minimize_op = self._optimizer.minimize()

    @property
    def minimize_op(self):
        return self._minimize_op
예제 #5
0
 def __init__(self,
              input_var: tf.Variable,
              predictions_fn: Callable,
              loss_fn: Callable,
              diag_hessian_fn: Callable = None,
              max_cg_iter: int = 100,
              initial_update_delay: int = 0,
              update_frequency: int = 1,
              **extra_init_kwargs: int):
     super().__init__(initial_update_delay, update_frequency)
     print('Extra initial parameters:', extra_init_kwargs)
     self._optimizer = LMA(input_var=input_var,
                           predictions_fn=predictions_fn,
                           loss_fn=loss_fn,
                           diag_hessian_fn=diag_hessian_fn,
                           max_cg_iter=max_cg_iter,
                           **extra_init_kwargs)