def optimizer_setting(params, parameter_list=None):
    ls = params["learning_strategy"]
    if ls["name"] == "piecewise_decay":
        if "total_images" not in params:
            total_images = 1281167
        else:
            total_images = params["total_images"]
        batch_size = ls["batch_size"]
        step = int(total_images / batch_size + 1)

        bd = [step * e for e in ls["epochs"]]
        base_lr = params["lr"]
        lr = []
        lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
        if fluid._non_static_mode():
            optimizer = fluid.optimizer.SGD(learning_rate=0.01,
                                            parameter_list=parameter_list)
        else:
            optimizer = fluid.optimizer.SGD(learning_rate=0.01)
        # TODO(minqiyang): Add learning rate scheduler support to dygraph mode
        #  optimizer = fluid.optimizer.Momentum(
        #  learning_rate=params["lr"],
        #  learning_rate=fluid.layers.piecewise_decay(
        #  boundaries=bd, values=lr),
        #  momentum=0.9,
        #  regularization=fluid.regularizer.L2Decay(1e-4))

    return optimizer
def optimizer_setting(params, parameter_list=None):
    ls = params["learning_strategy"]
    if "total_images" not in params:
        total_images = 6149
    else:
        total_images = params["total_images"]

    batch_size = ls["batch_size"]
    step = int(math.ceil(float(total_images) / batch_size))
    bd = [step * e for e in ls["epochs"]]
    lr = params["lr"]
    num_epochs = params["num_epochs"]
    if fluid._non_static_mode():
        optimizer = fluid.optimizer.Momentum(
            learning_rate=fluid.layers.cosine_decay(learning_rate=lr,
                                                    step_each_epoch=step,
                                                    epochs=num_epochs),
            momentum=momentum_rate,
            regularization=fluid.regularizer.L2Decay(l2_decay),
            parameter_list=parameter_list)
    else:
        optimizer = fluid.optimizer.Momentum(
            learning_rate=fluid.layers.cosine_decay(learning_rate=lr,
                                                    step_each_epoch=step,
                                                    epochs=num_epochs),
            momentum=momentum_rate,
            regularization=fluid.regularizer.L2Decay(l2_decay))

    return optimizer
 def check_type(op_str, x, y, binary_op):
     op = getattr(paddle, op_str)
     error_type = ValueError
     if isinstance(x, np.ndarray):
         x = paddle.to_tensor(x)
         y = paddle.to_tensor(y)
         error_type = BaseException
     if binary_op:
         if type_str_map['x'] != type_str_map['y']:
             unit_test.assertRaises(error_type, op, x=x, y=y)
         if not fluid._non_static_mode():
             error_type = TypeError
             unit_test.assertRaises(error_type, op, x=x, y=y, out=1)
     else:
         if not fluid._non_static_mode():
             error_type = TypeError
             unit_test.assertRaises(error_type, op, x=x, out=1)
def calc_gradients(outputs, inputs, no_grad_set):
    if fluid._non_static_mode():
        return fluid.dygraph.grad(outputs=outputs,
                                  inputs=inputs,
                                  no_grad_vars=no_grad_set,
                                  create_graph=True)
    else:
        return fluid.gradients(targets=outputs,
                               inputs=inputs,
                               no_grad_set=no_grad_set)
 def forward(self, input):
     if fluid._non_static_mode():
         out, _, _ = _C_ops.instance_norm(input, self.scale, self.bias,
                                          'epsilon', self.epsilon)
         return out
     else:
         return fluid.layers.instance_norm(
             input,
             epsilon=self.epsilon,
             param_attr=fluid.ParamAttr(self.scale.name),
             bias_attr=fluid.ParamAttr(self.bias.name))
def build_optimizer(layer, cfg, loss=None):
    learning_rate = 1e-3
    beta1 = 0.5
    beta2 = 0.999
    if fluid._non_static_mode():
        return fluid.optimizer.Adam(learning_rate=learning_rate,
                                    beta1=beta1,
                                    beta2=beta2,
                                    parameter_list=layer.parameters())
    else:
        optimizer = fluid.optimizer.Adam(learning_rate=learning_rate,
                                         beta1=beta1,
                                         beta2=beta2)

        optimizer.minimize(loss, parameter_list=layer.parameters())
        return optimizer
Beispiel #7
0
def optimizer_setting(params, parameter_list=None):
    ls = params["learning_strategy"]
    if ls["name"] == "piecewise_decay":
        if "total_images" not in params:
            total_images = 6149
        else:
            total_images = params["total_images"]
        # TODO(Yancey1989): using lr decay if it is ready.
        #batch_size = ls["batch_size"]
        #step = int(total_images / batch_size + 1)

        #bd = [step * e for e in ls["epochs"]]
        #base_lr = params["lr"]
        #lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
        if fluid._non_static_mode():
            optimizer = fluid.optimizer.SGD(learning_rate=0.01,
                                            parameter_list=parameter_list)
        else:
            optimizer = fluid.optimizer.SGD(learning_rate=0.01)

    return optimizer
Beispiel #8
0
 def test_func2(self):
     # After test_func1 executed, if fluid.dygraph.guard() in test_func1 safely exited,
     # fluid._non_static_mode() should be false.
     self.assertEqual(fluid._non_static_mode(), False)
 def __impl__(*args, **kwargs):
     if fluid._non_static_mode():
         return func(*args, **kwargs)
     else:
         with fluid.dygraph.guard():
             return func(*args, **kwargs)
 def get_tracer_mode(self):
     assert fluid._non_static_mode(), "Dygraph mode must be enabled"
Beispiel #11
0
    def __call__(self, *args, **kwargs):
        """
        Supports to call the returned instance with input `args` and `kwargs` directly.

        Args:
            *args(tuple): tuple of all input arguments from original decorated function.
            **kwargs(dict): dict of all input keyward arguments from original decorated function. 

        Return:
            Outputs of decorated function.
        """

        # 1. call dygraph function directly if not enable `declarative`
        if not self._program_trans.enable_to_static:
            # NOTE(liym27):
            # Here calls `warnings.warn` but not `logging_utils.warn` because by default warnings.warn(message)
            # will show up **only once**. StaticFunction.__call__ will run many times, it is appropriate to
            # display this warning message only once.
            logging_utils.warn(
                "The decorator '@paddle.jit.to_static' does NOT work when setting ProgramTranslator.enable to False. "
                "We will just return dygraph output. If you would like to get static graph output, please call API "
                "ProgramTranslator.enable(True)")
            return self._call_dygraph_function(*args, **kwargs)

        if not _non_static_mode():
            raise RuntimeError(
                "Failed to run the callable object {} decorated by '@paddle.jit.to_static', "
                "because it is NOT in dynamic mode. Please disable the static mode to enter dynamic mode with the "
                "following API: paddle.disable_static().".format(
                    self.dygraph_function))

        # 2. trace ops from dygraph layers and cache the generated program.
        args, kwargs = self._function_spec.unified_args_and_kwargs(
            args, kwargs)

        try:
            concrete_program, partial_program_layer = self.get_concrete_program(
                *args, **kwargs, is_train=self._is_train_mode())

            # 3. synchronize self.training attribute.
            if isinstance(self._class_instance, layers.Layer):
                partial_program_layer.training = self._class_instance.training
            else:
                partial_program_layer.training = self._training

            partial_program_layer._cuda_graph_capture_mode = self._cuda_graph_capture_mode
            partial_program_layer._cuda_graph_pool_id = self._cuda_graph_pool_id

            # 4. return outputs.
            try:
                return partial_program_layer(args)
            except Exception as e:
                if not hasattr(e, error.ERROR_DATA):
                    # runtime error
                    error.attach_error_data(e, in_runtime=True)
                    raise
        except Exception as e:
            error_data = getattr(e, error.ERROR_DATA, None)
            if error_data:
                error_data.raise_new_exception()
            else:
                logging_utils.warn(
                    "Please file an issue at 'https://github.com/PaddlePaddle/Paddle/issues'"
                    " if you can't handle this {} yourself.".format(type(e)))
                raise e