def _trace(layer, inputs, feed_prefix='feed_', fetch_prefix='fetch_', tmp_prefix='t_'): assert isinstance(layer, Layer) if not isinstance(inputs, (list, tuple)): inputs = [inputs] tracer = _dygraph_tracer()._get_program_desc_tracer() var_list = extract_vars(inputs) with program_desc_tracing_guard(True): original_outputs = layer(*inputs) if not isinstance(original_outputs, (list, tuple)): outputs = [original_outputs] else: outputs = original_outputs out_vars = [var for var in outputs] program_desc, feed_names, fetch_names, parameters = tracer.create_program_desc( var_list, feed_prefix, out_vars, fetch_prefix, tmp_prefix) tracer.reset() with _dygraph_guard(None): program = create_program_from_desc(program_desc) return original_outputs, program, feed_names, fetch_names, parameters
def guard(place=None): train = framework.Program() startup = framework.Program() tracer = Tracer(train.current_block().desc) if place is None: if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: place = core.CPUPlace() with framework.program_guard(train, startup): with framework.unique_name.guard(): with framework._dygraph_guard(tracer): with framework._dygraph_place_guard(place): yield
def dygraph2program(layer, inputs, feed_prefix='feed_', fetch_prefix='fetch_', tmp_prefix='t_', extract_inputs_fn=None, extract_outputs_fn=None, dtypes=None): print(type(layer)) assert isinstance(layer, Layer) extract_inputs_fn = extract_inputs_fn if extract_inputs_fn is not None else extract_vars extract_outputs_fn = extract_outputs_fn if extract_outputs_fn is not None else extract_vars if os.environ.get("FLAGS_enable_eager_mode") == "1": return _dy2prog(layer, inputs, feed_prefix, fetch_prefix, tmp_prefix, extract_inputs_fn, extract_outputs_fn, dtypes) tracer = _dygraph_tracer()._get_program_desc_tracer() with program_desc_tracing_guard(True): if _is_shape(inputs): shapes = [inputs] inputs = _create_tensors(shapes, dtypes=dtypes) input_var_list = inputs elif _is_shapes(inputs): inputs = _create_tensors(inputs, dtypes=dtypes) input_var_list = inputs else: inputs = to_variables(inputs) input_var_list = extract_inputs_fn(inputs) original_outputs = layer(*inputs) # 'original_outputs' may be dict, so we should convert it to list of varibles. # And should not create new varibles in 'extract_vars'. out_var_list = extract_outputs_fn(original_outputs) program_desc, feed_names, fetch_names, parameters = tracer.create_program_desc( input_var_list, feed_prefix, out_var_list, fetch_prefix, tmp_prefix) tracer.reset() with _dygraph_guard(None): program = Program() program.desc = program_desc program.blocks = [Block(program, 0)] program._sync_with_cpp() return program
def guard(place=None): """ :api_attr: imperative This context will create a dygraph context for dygraph to run, using python ``with`` statement. Parameters: place(fluid.CPUPlace| fluid.CUDAPlace|str, optional): Place to execute dygraph. If None, the running place will be determined according to the way of paddle compilation. If ``place`` is string, It can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the index of the GPUs or XPUs. Default: None return: None Examples: .. code-block:: python import numpy as np import paddle.fluid as fluid with fluid.dygraph.guard(): inp = np.ones([3, 1024], dtype='float32') t = fluid.dygraph.base.to_variable(inp) linear1 = fluid.Linear(1024, 4, bias_attr=False) linear2 = fluid.Linear(4, 4) ret = linear1(t) dy_ret = linear2(ret) """ train = framework.Program() startup = framework.Program() tracer = Tracer() VarBase = core.VarBase if place is not None: expected_place = _get_paddle_place(place) else: expected_place = framework._current_expected_place() with framework.program_guard(train, startup): with framework.unique_name.guard(): with framework._dygraph_guard(tracer): with framework._dygraph_place_guard(expected_place): yield
def guard(place=None): """ :api_attr: imperative This context will create a dygraph context for dygraph to run, using python ``with`` statement. Parameters: place(fluid.CPUPlace or fluid.CUDAPlace, optional): Place to execute dygraph. If None, the running place will be determined according to the way of paddle compilation. Default: None return: None Examples: .. code-block:: python import numpy as np import paddle.fluid as fluid with fluid.dygraph.guard(): inp = np.ones([3, 1024], dtype='float32') t = fluid.dygraph.base.to_variable(inp) linear1 = fluid.Linear(1024, 4, bias_attr=False) linear2 = fluid.Linear(4, 4) ret = linear1(t) dy_ret = linear2(ret) """ train = framework.Program() startup = framework.Program() tracer = Tracer() VarBase = core.VarBase if place is None: if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: place = core.CPUPlace() tracer._expected_place = place with framework.program_guard(train, startup): with framework.unique_name.guard(): with framework._dygraph_guard(tracer): with framework._dygraph_place_guard(place): yield
def guard(place=None): """ This context will create a dygraph context for dygraph to run Args: place(fluid.CPUPlace|fluid.CUDAPlace|None): Place to run return: None Examples: .. code-block:: python import numpy as np import paddle.fluid as fluid with fluid.dygraph.guard(): inp = np.ones([3, 32, 32], dtype='float32') t = fluid.dygraph.base.to_variable(inp) fc1 = fluid.FC('fc1', size=4, bias_attr=False, num_flatten_dims=1) fc2 = fluid.FC('fc2', size=4) ret = fc1(t) dy_ret = fc2(ret) """ train = framework.Program() startup = framework.Program() tracer = Tracer() if place is None: if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: place = core.CPUPlace() with framework.program_guard(train, startup): with framework.unique_name.guard(): with framework._dygraph_guard(tracer): with framework._dygraph_place_guard(place): yield
def __impl__(*args, **kwargs): with framework._dygraph_guard(None): return func(*args, **kwargs)
def is_equal_program(prog1, prog2): with _dygraph_guard(None): return _is_equal_program(prog1, prog2)