Exemplo n.º 1
0
    def _run(self, to_static):
        prog_trans = ProgramTranslator()
        prog_trans.enable(to_static)

        result = self.dygraph_func(self.input)

        return result.numpy()
Exemplo n.º 2
0
 def train(self, to_static=False):
     prog_trans = ProgramTranslator()
     prog_trans.enable(to_static)
     with fluid.dygraph.guard(PLACE):
         net = NetWithDictPop()
         ret = net(z=0, x=self.x, y=True)
         return ret.numpy()
Exemplo n.º 3
0
    def _run(self, mode, to_static):
        prog_trans = ProgramTranslator()
        prog_trans.enable(to_static)

        net = self.Net(mode)
        ret = net(self.x, self.y)
        return ret.numpy()
Exemplo n.º 4
0
 def train(self, to_static=False):
     prog_trans = ProgramTranslator()
     prog_trans.enable(to_static)
     with fluid.dygraph.guard(PLACE):
         net = MainNetWithDict(batch_size=self.batch_size)
         ret = net(self.x)
         return ret.numpy()
Exemplo n.º 5
0
    def test_export_deploy_model(self):
        for dynamic in [True, False]:
            fluid.enable_dygraph() if dynamic else None
            # paddle.disable_static() if dynamic else None
            prog_translator = ProgramTranslator()
            prog_translator.enable(False) if not dynamic else None
            net = LeNetDeclarative()
            inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
            model = Model(net, inputs)
            model.prepare()
            save_dir = tempfile.mkdtemp()
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            tensor_img = np.array(np.random.random((1, 1, 28, 28)),
                                  dtype=np.float32)
            ori_results = model.test_batch(tensor_img)
            model.save(save_dir, training=False)
            fluid.disable_dygraph() if dynamic else None

            place = fluid.CPUPlace(
            ) if not fluid.is_compiled_with_cuda() else fluid.CUDAPlace(0)
            new_scope = fluid.Scope()
            with fluid.scope_guard(new_scope):
                exe = fluid.Executor(place)
                [inference_program, feed_target_names, fetch_targets
                 ] = (fluid.io.load_inference_model(dirname=save_dir,
                                                    executor=exe))
                results = exe.run(inference_program,
                                  feed={feed_target_names[0]: tensor_img},
                                  fetch_list=fetch_targets)
                np.testing.assert_allclose(results,
                                           ori_results,
                                           rtol=1e-5,
                                           atol=1e-7)
                shutil.rmtree(save_dir)
Exemplo n.º 6
0
 def __impl__(*args, **kwargs):
     program_translator = ProgramTranslator()
     if not program_translator.enable_declarative:
         logger.info(
             "The decorator 'declarative' doesn't work when setting ProgramTranslator.enable=False. "
             "We will just return dygraph output.")
         return dygraph_func(*args, **kwargs)
     return program_translator.get_output(dygraph_func, *args, **kwargs)
Exemplo n.º 7
0
    def _run(self, to_static=False):
        prog_trans = ProgramTranslator()
        prog_trans.enable(to_static)

        with fluid.dygraph.guard(place):
            net = self.Net()
            x_v = fluid.dygraph.to_variable(self.x)
            ret = net(x_v)
            return ret.numpy()
Exemplo n.º 8
0
 def __impl__(*args, **kwargs):
     program_translator = ProgramTranslator()
     if in_dygraph_mode() or not program_translator.enable_declarative:
         logger.info(
             "The decorator 'dygraph_to_static_func' doesn't work in "
             "dygraph mode or set ProgramTranslator.enable to False. "
             "We will just return dygraph output.")
         return dygraph_func(*args, **kwargs)
     static_func = program_translator.get_func(dygraph_func)
     return static_func(*args, **kwargs)
Exemplo n.º 9
0
 def __impl__(*args, **kwargs):
     if in_dygraph_mode():
         warnings.warn(
             "The decorator 'dygraph_to_static_program' doesn't work in "
             "dygraph mode. We will just return dygraph output. Use the "
             "decorator in static mode if you would like to translate to "
             "static graph.")
         return dygraph_func(*args, **kwargs)
     program_translator = ProgramTranslator()
     return program_translator.get_program(dygraph_func, *args, **kwargs)
Exemplo n.º 10
0
 def test_ast_to_func(self):
     ProgramTranslator().enable(True)
     with self.assertRaises(TypeError):
         static_func = paddle.jit.to_static(self.dyfunc)
         out = static_func(self.x)
     # Why need set `_in_declarative_mode_` here? 
     # In Dy2St we use `with _switch_declarative_mode_guard_()` to indicate 
     # that the code block is under @to_static, but in this UT 
     # an exception is thrown during Dy2St, making the `_in_declarative_mode_` 
     # a wrong value. So We need set `_in_declarative_mode_` to False manually.
     paddle.fluid.dygraph.base._in_declarative_mode_ = False
     ProgramTranslator().enable(False)
Exemplo n.º 11
0
 def get_dy2stat_out(self):
     ProgramTranslator().enable(True)
     static_func = paddle.jit.to_static(self.dyfunc)
     out = static_func(self.x)
     ProgramTranslator().enable(False)
     return out
Exemplo n.º 12
0
 def __impl__(*args, **kwargs):
     program_translator = ProgramTranslator()
     return program_translator.get_code(dygraph_func)
Exemplo n.º 13
0
def get_program(layer, input_spec, output_spec, **configs):
    paddle.jit.set_verbosity(0)
    prog_translator = ProgramTranslator()
    if not prog_translator.enable_to_static:
        raise RuntimeError(
            "The Paddle2onnx doesn't work when setting ProgramTranslator.enable to False."
        )

    if not isinstance(layer, Layer):
        raise TypeError(
            "The input of paddle2onnx should be 'Layer', but received input type is %s."
            % type(layer))

    if isinstance(layer, paddle.DataParallel):
        inner_layer = layer._layers
    else:
        inner_layer = layer

    # avoid change user given input_spec
    inner_input_spec = None
    if input_spec is not None:
        for attr_func in dir(inner_layer):
            static_func = getattr(inner_layer, attr_func, None)
            if isinstance(static_func,
                          StaticFunction) and 'forward' != attr_func:
                raise ValueError(
                    "If there are static functions other than 'forward' that need to be saved, the input 'input_spec' should be None, but received the type of 'input_spec' is %s."
                    % type(input_spec))

        if not isinstance(input_spec, (list, tuple)):
            raise TypeError(
                "The input input_spec should be 'list', but received input_spec's type is %s."
                % type(input_spec))
        inner_input_spec = []
        for var in flatten(input_spec):
            if isinstance(var, paddle.static.InputSpec):
                inner_input_spec.append(var)
            elif isinstance(var, (core.VarBase, core.eager.Tensor, Variable)):
                inner_input_spec.append(
                    paddle.static.InputSpec.from_tensor(var))
            else:
                # NOTE(Aurelius84): Support non-Tensor type in `input_spec`.
                inner_input_spec.append(var)

    extra_var_info = dict()
    functions = dir(inner_layer)
    for attr_func in functions:
        static_func = getattr(inner_layer, attr_func, None)
        if isinstance(static_func, StaticFunction):
            concrete_program = static_func.concrete_program_specify_input_spec(
                inner_input_spec)
        elif 'forward' == attr_func:
            # transform in jit.save, if input_spec is incomplete, declarative will throw error
            # inner_input_spec is list[InputSpec], it should be packed with same structure
            # as original input_spec here.
            if inner_input_spec:
                inner_input_spec = pack_sequence_as(input_spec,
                                                    inner_input_spec)
            static_forward = declarative(inner_layer.forward,
                                         input_spec=inner_input_spec)
            concrete_program = static_forward.concrete_program
            # the input_spec has been used in declarative, which is equal to
            # @declarative with input_spec and jit.save without input_spec,
            # avoid needless warning
            inner_input_spec = None
        else:
            continue

        input_var_names = _get_input_var_names(concrete_program.inputs,
                                               inner_input_spec)

        # NOTE(chenweihang): [ Get output variables ]
        # the rule is like [ Get input variables name ]. For output var,
        # we only support VarBase spec, and actually, we only need the
        # var name of output, and we don't recommended to use output_spec
        output_vars = _get_output_vars(concrete_program.outputs, output_spec)

    feeded_var_names = input_var_names
    target_vars = output_vars
    main_program = concrete_program.main_program.clone()
    export_for_deployment = True

    if isinstance(feeded_var_names, six.string_types):
        feeded_var_names = [feeded_var_names]
    elif export_for_deployment:
        if len(feeded_var_names) > 0:
            # TODO(paddle-dev): polish these code blocks
            if not (bool(feeded_var_names) and all(
                    isinstance(name, six.string_types)
                    for name in feeded_var_names)):
                raise ValueError("'feed_var_names' should be a list of str.")

    if isinstance(target_vars, Variable):
        target_vars = [target_vars]
    elif export_for_deployment:
        if not (bool(target_vars)
                and all(isinstance(var, Variable) for var in target_vars)):
            raise ValueError("'target_vars' should be a list of Variable.")

    main_program = _get_valid_program(main_program)

    # remind user to set auc_states to zeros if the program contains auc op
    all_ops = main_program.global_block().ops
    for op in all_ops:
        # clear device of Op
        device_attr_name = core.op_proto_and_checker_maker.kOpDeviceAttrName()
        op._set_attr(device_attr_name, "")
        if op.type == 'auc':
            warnings.warn(
                "please ensure that you have set the auc states to zeros before saving inference model"
            )
            break

    with program_guard(main_program):
        uniq_target_vars = []
        for i, var in enumerate(target_vars):
            uniq_target_vars.append(var)
        target_vars = uniq_target_vars
    target_var_name_list = [var.name for var in target_vars]

    origin_program = main_program.clone()

    main_program = main_program.clone()
    global_block = main_program.global_block()
    need_to_remove_op_index = []
    for i, op in enumerate(global_block.ops):
        op.desc.set_is_target(False)
        if op.type == "feed" or op.type == "fetch":
            need_to_remove_op_index.append(i)

    for index in need_to_remove_op_index[::-1]:
        global_block._remove_op(index)

    main_program.desc.flush()

    main_program = main_program._prune_with_input(
        feeded_var_names=feeded_var_names, targets=target_vars)
    main_program = main_program._inference_optimize(prune_read_op=True)
    fetch_var_names = [v.name for v in target_vars]

    for target_v in target_vars:
        if not main_program.global_block().has_var(target_v.name):
            main_program.global_block().create_var(
                name=target_v.name,
                shape=target_v.shape,
                dtype=target_v.dtype,
                persistable=target_v.persistable)

    prepend_feed_ops(main_program, feeded_var_names)
    append_fetch_ops(main_program, fetch_var_names)

    main_program.desc._set_version()
    paddle.fluid.core.save_op_version_info(main_program.desc)

    main_program._copy_dist_param_info_from(origin_program)

    return main_program, feeded_var_names, target_vars