Пример #1
0
 def get_new_shape_tensor(list_shape):
     """
     get_new_shape_tensor
     """
     new_shape_tensor = []
     for dim in list_shape:
         if isinstance(dim, Variable):
             dim.stop_gradient = True
             new_shape_tensor.append(dim)
         else:
             assert (isinstance(dim, int))
             temp_out = _helper.create_variable_for_type_inference('int32')
             fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
             new_shape_tensor.append(temp_out)
     return new_shape_tensor
Пример #2
0
def interpolate(input,
                out_shape=None,
                scale=None,
                name=None,
                resample='BILINEAR',
                actual_shape=None,
                align_corners=True,
                align_mode=1,
                data_format='NCHW'):
    """
	:alias_main: paddle.nn.functional.interpolate
	:alias: paddle.nn.functional.interpolate,paddle.nn.functional.common.interpolate

    This op resizes a batch of images.
    The input must be a 4-D Tensor of the shape (num_batches, channels, in_h, in_w)
    or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape
    (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
    and the resizing only applies on the three dimensions(depth, height and width).
    **Warning:** the parameter :attr:`actual_shape` will be deprecated in the
    future and only use :attr:`out_shape` instead.
    Supporting resample methods:
        'BILINEAR' : Bilinear interpolation
        'TRILINEAR' : Trilinear interpolation
        'NEAREST' : Nearest neighbor interpolation
        'BICUBIC' : Bicubic interpolation

    Nearest neighbor interpolation is to perform nearest neighbor interpolation
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
    direction) on input tensor.

    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
    again in the other direction.

    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
    The linear interpolation is performed on three directions.
    Align_corners and align_mode are optional parameters,the calculation method
    of interpolation can be selected by them.

    Bicubic interpolation is an extension of cubic interpolation for interpolating
    data points on a two-dimensional regular grid. The interpolated surface is
    smoother than corresponding surfaces obtained by bilinear interpolation or
    nearest-neighbor interpolation.

    Example:

    .. code-block:: text

        For scale:

            if align_corners = True && out_size > 1 :
              scale_factor = (in_size-1.0)/(out_size-1.0)

            else:

              scale_factor = float(in_size/out_size)


        Nearest neighbor interpolation:

          if:
              align_corners = False
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = floor (H_{in} * scale_{factor})
              W_out = floor (W_{in} * scale_{factor})
          else:
              align_corners = True
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = round(H_{in} * scale_{factor})
              W_out = round(W_{in} * scale_{factor})
        Bilinear interpolation:
          if:
              align_corners = False , align_mode = 0

              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:

              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:

              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Bicubic interpolation:

          if:
              align_corners = False

              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:

              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5

          else:

              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

        Trilinear interpolation:
          if:
              align_corners = False , align_mode = 0

              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:

              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:

              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = D_{in} * scale_{factor}
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

    For details of nearest neighbor interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
    For details of bilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
    For details of trilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Trilinear_interpolation.
    For details of bicubic interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bicubic_interpolation
    Parameters:
        input (Variable): 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
        out_shape(list|tuple|Variable|None): Output shape of image resize
             layer, the shape is (out_h, out_w) when input is a 4-D Tensor and is
             (out_d, out_h, out_w) when input is a 5-D Tensor. Default: None. If
             a list, each element can be an integer or a Tensor Variable of shape: [1].
             If a Tensor Variable, its dimensions size should be a 1.
        scale(float|Variable|None): The multiplier for the input height or width. At
             least one of :attr:`out_shape` or :attr:`scale` must be set.
             And :attr:`out_shape` has a higher priority than :attr:`scale`.
             Default: None.
        name(str|None): A name for this layer(optional). If set None, the layer
                        will be named automatically.
        resample(str): The resample method. It supports 'BILINEAR', 'TRILINEAR' ,
                       'BICUBIC' and 'NEAREST' currently. Default: 'BILINEAR'
        actual_shape(Variable): An optional input to specify output shape
                                dynamically. If provided, image resize
                                according to this given shape rather than
                                :attr:`out_shape` and :attr:`scale` specifying
                                shape. That is to say actual_shape has the
                                highest priority. It is recommended to use
                                :attr:`out_shape` if you want to specify output
                                shape dynamically, because :attr:`actual_shape`
                                will be deprecated. When using actual_shape to
                                specify output shape, one of :attr:`out_shape`
                                and :attr:`scale` should also be set, otherwise
                                errors would be occurred in graph constructing stage.
                                Default: None
        align_corners(bool) :  An optional bool, If True, the centers of the 4 corner pixels of the
                               input and output tensors are aligned, preserving the values at the
                               corner pixels.
                               Default: True
        align_mode(int)  :  An optional for bilinear interpolation. can be \'0\'
                            for src_idx = scale*(dst_indx+0.5)-0.5 , can be \'1\' for
                            src_idx = scale*dst_index.
        data_format (str, optional): Specify the data format of the input, and the data format of the output
            will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`, `"NCDHW"`,
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
    Returns:
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
        or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
    Raises:
        TypeError: out_shape should be a list or tuple or Variable.
        TypeError: actual_shape should either be Variable or None.
        ValueError: The 'resample' of image_resize can only be 'BILINEAR',
                    'TRILINEAR', 'BICUBIC', or 'NEAREST' currently.
        ValueError: 'BILINEAR', 'BICUBIC' and 'NEAREST' only support 4-D tensor.
        ValueError: 'TRILINEAR' only support 5-D tensor.
        ValueError: One of out_shape and scale must not be None.
        ValueError: out_shape length should be 2 for input 4-D tensor.
        ValueError: out_shape length should be 3 for input 5-D tensor.
        ValueError: scale should be greater than zero.
        TypeError: align_corners should be a bool value
        ValueError: align_mode can only be '0' or '1'
        ValueError: data_format can only be 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
    Examples:
        .. code-block:: python

	    #declarative mode
	    import paddle
	    import numpy as np
	    input = fluid.data(name="input", shape=[None,3,6,10])
	    #1
	    output = paddle.nn.functional.interpolate(input=input,out_shape=[12,12])
	    #2
	    #x = np.array([2]).astype("int32")
	    #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
	    #fluid.layers.assign(input=x, output=dim1)
	    #output = paddle.nn.functional.interpolate(input=input,out_shape=[12,dim1])
	    #3
	    #x = np.array([3,12]).astype("int32")
	    #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
	    #fluid.layers.assign(input=x, output=shape_tensor)
	    #output = paddle.nn.functional.interpolate(input=input,out_shape=shape_tensor)
	    #4
	    #x = np.array([0.5]).astype("float32")
	    #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
	    #fluid.layers.assign(x,scale_tensor)
	    #output = paddle.nn.functional.interpolate(input=input,scale=scale_tensor)
	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())

	    input_data = np.random.rand(2,3,6,10).astype("float32")
	    output_data = exe.run(fluid.default_main_program(),
                feed={"input":input_data},
                fetch_list=[output],
                return_numpy=True)

	    print(output_data[0].shape)
	    #1
	    # (2, 3, 12, 12)
	    #2
	    # (2, 3, 12, 2)
	    #3
	    # (2, 3, 3, 12)
	    #4
	    # (2, 3, 3, 5)
	    #imperative mode
	    import paddle.fluid.dygraph as dg
	    with dg.guard(place) as g:
    		input = dg.to_variable(input_data)
    		output = paddle.nn.functional.interpolate(input=input, out_shape=[12,12])
    		print(output.shape)
		# [2L, 3L, 12L, 12L]
    """
    resample_methods = {
        'LINEAR': 'linear',
        'BILINEAR': 'bilinear',
        'TRILINEAR': 'trilinear',
        'NEAREST': 'nearest',
        'BICUBIC': 'bicubic',
    }
    if resample not in resample_methods:
        raise ValueError(
            "The 'resample' of image_resize can only be 'LINEAR', 'BILINEAR', 'TRILINEAR', "
            " 'BICUBIC' or 'NEAREST' currently.")
    resample_type = resample_methods[resample]

    if resample in ['LINEAR'] and len(input.shape) != 3:
        raise ValueError("'LINEAR' only support 3-D tensor.")

    if resample in ['BILINEAR', 'NEAREST', 'BICUBIC'] and len(input.shape) != 4:
        raise ValueError(
            "'BILINEAR', 'BICUBIC' and 'NEAREST' only support 4-D tensor.")
    if resample == 'TRILINEAR' and len(input.shape) != 5:
        raise ValueError("'TRILINEAR'only support 5-D tensor.")

    if not isinstance(align_corners, bool):
        raise TypeError("Attr align_corners should be a bool value")
    if align_mode != 0 and align_mode != 1:
        raise ValueError("align_mode can only be 0 or 1")

    if out_shape is None and scale is None:
        raise ValueError("One of out_shape and scale must not be None.")
    helper = LayerHelper('{}_interp'.format(resample_type), **locals())
    dtype = helper.input_dtype()

    if len(input.shape) == 3 and data_format not in ['NCHW', 'NHWC']:
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
            " received but only `NCHW` or `NHWC` supported for 3-D input.")
    elif len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
            " received but only `NCHW` or `NHWC` supported for 4-D input.")
    elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
            " received but only `NCDHW` or `NDHWC` supported for 5-D input.")

    def _is_list_or_turple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

    if data_format == 'NCHW' or data_format == 'NCDHW':
        data_layout = 'NCHW'
    if data_format == 'NHWC' or data_format == 'NDHWC':
        data_layout = 'NHWC'

    inputs = {"X": input}
    attrs = {
        "out_d": -1,
        "out_h": -1,
        "out_w": -1,
        "interp_method": resample_type,
        "align_corners": align_corners,
        "align_mode": align_mode,
        "data_layout": data_layout
    }

    if out_shape is not None:
        if isinstance(out_shape, Variable):
            out_shape.stop_gradient = True
            inputs['OutSize'] = out_shape
        else:
            if not (_is_list_or_turple_(out_shape)):
                raise TypeError(
                    "out_shape should be a list or tuple or Variable.")
            # Validate the shape
            contain_var = False
            for dim_idx, dim_size in enumerate(out_shape):
                if isinstance(dim_size, Variable):
                    contain_var = True
                    continue
                assert dim_size > 0, (
                    "Each dimension size given in out_shape must be greater than 0."
                )

            if contain_var:
                new_size_tensor = []
                size_list = []
                for dim in out_shape:
                    if isinstance(dim, Variable):
                        dim.stop_gradient = True
                        new_size_tensor.append(dim)
                        size_list.append(-1)
                    else:
                        assert (isinstance(dim, int))
                        temp_out = helper.create_variable_for_type_inference(
                            'int32')
                        fill_constant(
                            [1], 'int32', dim, force_cpu=True, out=temp_out)
                        new_size_tensor.append(temp_out)
                        size_list.append(dim)
                inputs['SizeTensor'] = new_size_tensor

            if len(input.shape) == 3:
                if len(out_shape) != 1:
                    raise ValueError(
                        "out_shape length should be 2 for input 3-D tensor")
                if contain_var:
                    attrs['out_w'] = size_list[0]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_w'] = out_shape[0]
            if len(input.shape) == 4:
                if len(out_shape) != 2:
                    raise ValueError("out_shape length should be 2 for "
                                     "input 4-D tensor.")
                if contain_var:
                    attrs['out_h'] = size_list[0]
                    attrs['out_w'] = size_list[1]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_h'] = out_shape[0]
                    attrs['out_w'] = out_shape[1]
            if len(input.shape) == 5:
                if len(out_shape) != 3:
                    raise ValueError("out_shape length should be 3 for "
                                     "input 5-D tensor.")
                if contain_var:
                    attrs['out_d'] = size_list[0]
                    attrs['out_h'] = size_list[1]
                    attrs['out_w'] = size_list[2]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_d'] = out_shape[0]
                    attrs['out_h'] = out_shape[1]
                    attrs['out_w'] = out_shape[2]

    else:
        if isinstance(scale, Variable):
            scale.stop_gradient = True
            inputs["Scale"] = scale
        elif isinstance(scale, float) or isinstance(scale, int):
            if scale <= 0:
                raise ValueError("Attr(scale) should be greater than zero.")
            attrs['scale'] = float(scale)
        else:
            raise TypeError(
                "Attr(scale)'s type should be float, int or Variable.")

    if isinstance(actual_shape, Variable):
        warnings.warn(
            "actual_shape will be deprecated, it is recommended to use "
            "out_shape instead of actual_shape to specify output shape dynamically."
        )
        actual_shape.stop_gradient = True
        inputs["OutSize"] = actual_shape
    elif actual_shape is not None:
        raise TypeError("actual_shape should either be Variable or None.")

    out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(
        type='{}_interp'.format(resample_type),
        inputs=inputs,
        outputs={"Out": out},
        attrs=attrs)
    return out
Пример #3
0
def decode_with_grammar(decoder, inits, decode_vocab, max_step_num, **kwargs):
    """A modification of paddle.fluid.layers.dynamic_decode(...).
    Dynamic decoding performs :code:`decoder.step()` repeatedly until the returned
    Tensor indicating finished status contains all True values or the number of
    decoding step reachs to :attr:`max_step_num`.
    :code:`decoder.initialize()` would be called once before the decoding loop.
    If the `decoder` has implemented `finalize` method, :code:`decoder.finalize()`
    would be called once after the decoding loop.

    Args:
        decoder(Decoder): An instance of `Decoder`.
        inits(tuple): Argument passed to `decoder.initialize`. 
        decode_vocab(DecoderDynamicVocab): namedtuple(table table_len column column_len value value_len)
        max_step_num(int): The maximum number of steps.
        **kwargs: Additional keyword arguments. Arguments passed to `decoder.step`. 

    Returns:
        tuple: A tuple( :code:`(final_outputs, final_states)` ) including the final \
            outputs and states, both are Tensor or nested structure of Tensor. \
            `final_outputs` has the same structure and data types as \
            :code:`decoder.output_dtype` , and each Tenser in `final_outputs` \
            is the stacked of all decoding steps' outputs, which might be revised \
            by :code:`decoder.finalize` . `final_states` is the counterpart \
            at last time step of initial states returned by :code:`decoder.initialize` , \
            thus has the same structure with it and has tensors with same shapes \
            and data types.
    """
    step_cnt = tensor.fill_constant(shape=[1], dtype="int64", value=1)
    max_step_num_tensor = tensor.fill_constant(shape=[1],
                                               dtype="int64",
                                               value=max_step_num - 2)

    # shape = [batch_size, beam_size, ...]
    initial_inputs, initial_states, initial_finished = decoder.initialize(
        inits, decode_vocab)
    global_inputs, global_states, global_finished = (initial_inputs,
                                                     initial_states,
                                                     initial_finished)
    inputs = initial_inputs
    states = initial_states

    # 保存输出结果
    outputs_arr_data = tensor.fill_constant_batch_size_like(
        inputs.input,
        shape=[-1, decoder.beam_size, max_step_num],
        dtype=decoder.output_dtype.predicted_ids,
        value=0)
    outputs_arr_pos = tensor.fill_constant_batch_size_like(
        inputs.input, shape=[-1, decoder.beam_size, 1], dtype='int64', value=0)
    outputs_array = data_structure.ArrayData(
        decoder.merge_batch_beams(outputs_arr_data),
        decoder.merge_batch_beams(outputs_arr_pos))

    sequence_lengths = tensor.cast(tensor.zeros_like(initial_finished),
                                   "int64")

    # 按语法解码的相关约束数据结构
    grammar_stack_dat = tensor.fill_constant_batch_size_like(
        inputs.input,
        shape=[-1, decoder.beam_size, max_step_num * STACK_EXPAND_TIMES],
        dtype='int64',
        value=0)
    grammar_stack_pos = tensor.fill_constant_batch_size_like(
        inputs.input, shape=[-1, decoder.beam_size, 1], dtype='int64', value=0)
    grammar_stack = data_structure.StackData(
        decoder.merge_batch_beams(grammar_stack_dat),
        decoder.merge_batch_beams(grammar_stack_pos))

    ############        循环解码,直到全部为 finish 状态        ############
    #   finish 的判断:通过 global_finished/next_finished && max_step_num 判断
    cond = layers.logical_not((layers.reduce_all(initial_finished)))
    while_op = layers.While(cond)
    with while_op.block():
        # step_outputs --> OutputWrapper
        # next_states  --> StateWrapper
        # next_inputs  --> DecoderInputsWrapper
        step_outputs, next_states, next_inputs = decoder.step(
            inputs, states, **kwargs)
        predicted_ids = step_outputs.predicted_ids
        _save_predict_output(outputs_array, predicted_ids,
                             next_states.finished)

        pred_gmr_type = decoder.grammar_type(predicted_ids)
        cond_type_leaf = layers.equal(pred_gmr_type, decoder.GMR_TYPE.LEAF)
        cond_type_midd = layers.equal(pred_gmr_type, decoder.GMR_TYPE.MID)

        _process_type_leaf(cond_type_leaf, decoder, grammar_stack, next_inputs,
                           next_states.finished)
        _process_type_midd(cond_type_midd, decoder, grammar_stack, next_inputs,
                           predicted_ids)

        ##next_sequence_lengths = layers.elementwise_add(sequence_lengths,
        ##                        tensor.cast(layers.logical_not(global_finished), sequence_lengths.dtype))

        _check_finished(decoder, next_inputs, next_states.finished,
                        outputs_array)

        layers.utils.map_structure(tensor.assign, next_inputs, global_inputs)
        layers.utils.map_structure(tensor.assign, next_states, global_states)
        tensor.assign(next_states.finished, global_finished)
        ##tensor.assign(next_sequence_lengths, sequence_lengths)

        # 更新循环条件
        layers.increment(x=step_cnt, value=1.0, in_place=True)
        layers.logical_and(
            layers.logical_not(layers.reduce_all(next_states.finished)),
            layers.less_equal(step_cnt, max_step_num_tensor), cond)

    final_outputs = outputs_array.data
    final_states = global_states

    final_outputs, final_states = decoder.finalize(final_outputs,
                                                   global_states,
                                                   sequence_lengths)

    return final_outputs, final_states
Пример #4
0
def grammar_output(inputs,
                   actions,
                   gmr_mask,
                   last_col2tbl_mask,
                   decode_vocab,
                   grammar,
                   name=None,
                   column2table=None):
    """output logits according to grammar

    Args:
        inputs (Variable): shape = [batch_size, max_len, hidden_size]. infer 阶段 max_len 恒为1
        actions (Variable): shape = [batch_size, max_len]. infer 阶段 max_len 恒为1
        gmr_mask (Variable): shape = [batch_size, max_len, grammar_size]. infer 阶段 max_len 恒为1
        last_col2tbl_mask (Variable): shape = [batch_size, max_len, max_table]. 解码过程中,上一个step为column时,其对应的 table mask
        decode_vocab (DecoderDynamicVocab): (table, table_len, column, column_len, value, value_len, column2table_mask).
                                            这里的column2table_mask是跟column一一对应的table mask。
        gramamr (Grammar): NULL
        name (str): Variable 的 name 前缀。用于多次调用时的参数共享。默认为 None,表示参数不会共享。

    Returns: (Variable, Variable)
        output: 词表输出概率
        valid_table_mask: 只在预测阶段有效

    Raises: NULL
    """
    batch_size = layers.shape(inputs)[0]
    max_len = inputs.shape[1]
    vocab_size = grammar.vocab_size

    action_shape = [batch_size, max_len]
    act_apply_rule = tensor.fill_constant(shape=action_shape,
                                          value=grammar.ACTION_APPLY,
                                          dtype='int64')
    act_stop = tensor.fill_constant(shape=action_shape,
                                    value=grammar.ACTION_STOP,
                                    dtype='int64')
    act_select_t = tensor.fill_constant(shape=action_shape,
                                        value=grammar.ACTION_SELECT_T,
                                        dtype='int64')
    act_select_c = tensor.fill_constant(shape=action_shape,
                                        value=grammar.ACTION_SELECT_C,
                                        dtype='int64')
    act_select_v = tensor.fill_constant(shape=action_shape,
                                        value=grammar.ACTION_SELECT_V,
                                        dtype='int64')
    cond_apply_rule = layers.logical_or(layers.equal(actions, act_apply_rule),
                                        layers.equal(actions, act_stop))
    cond_select_t = layers.equal(actions, act_select_t)
    cond_select_c = layers.equal(actions, act_select_c)
    cond_select_v = layers.equal(actions, act_select_v)

    # expand vocab to [-1, max_len, ...]
    if max_len == 1:
        expand_to_seq_len = lambda x: layers.unsqueeze(x, [1])
    else:
        expand_to_seq_len = lambda x: layers.expand(layers.unsqueeze(
            x, [1]), [1, max_len] + [1] * (len(x.shape) - 1))
    table_enc = expand_to_seq_len(decode_vocab.table)
    table_len = expand_to_seq_len(decode_vocab.table_len)
    column_enc = expand_to_seq_len(decode_vocab.column)
    column_len = expand_to_seq_len(decode_vocab.column_len)
    value_enc = expand_to_seq_len(decode_vocab.value)
    value_len = expand_to_seq_len(decode_vocab.value_len)
    column2table_mask = expand_to_seq_len(decode_vocab.column2table_mask)

    # merge batch & seq_len dim
    inputs = nn_utils.merge_first_ndim(inputs, n=2)
    actions = nn_utils.merge_first_ndim(actions, n=2)
    gmr_mask = nn_utils.merge_first_ndim(gmr_mask, n=2)
    last_col2tbl_mask = nn_utils.merge_first_ndim(last_col2tbl_mask, n=2)
    table_enc = nn_utils.merge_first_ndim(table_enc, n=2)
    table_len = nn_utils.merge_first_ndim(table_len, n=2)
    column_enc = nn_utils.merge_first_ndim(column_enc, n=2)
    column_len = nn_utils.merge_first_ndim(column_len, n=2)
    value_enc = nn_utils.merge_first_ndim(value_enc, n=2)
    value_len = nn_utils.merge_first_ndim(value_len, n=2)
    column2table_mask = nn_utils.merge_first_ndim(column2table_mask, n=2)
    cond_apply_rule = nn_utils.merge_first_ndim(cond_apply_rule, n=2)
    cond_select_t = nn_utils.merge_first_ndim(cond_select_t, n=2)
    cond_select_c = nn_utils.merge_first_ndim(cond_select_c, n=2)
    cond_select_v = nn_utils.merge_first_ndim(cond_select_v, n=2)

    t_ptr_net = models.PointerNetwork(score_type="affine",
                                      name='gmr_output_t_ptr')
    c_ptr_net = models.PointerNetwork(score_type="affine",
                                      name='gmr_output_c_ptr')
    v_ptr_net = models.PointerNetwork(score_type="affine",
                                      name='gmr_output_v_ptr')

    ## 核心处理逻辑 ##
    apply_rule_output = _apply_rule(cond_apply_rule,
                                    inputs,
                                    gmr_mask,
                                    grammar,
                                    name=name)
    select_t_output = \
            _select_table(cond_select_t, inputs, table_enc, table_len, last_col2tbl_mask, t_ptr_net, grammar)
    select_c_output, valid_table_mask = \
            _select_column(cond_select_c, inputs, column_enc, column_len, c_ptr_net, grammar, column2table_mask)
    select_v_output = _select_value(cond_select_v, inputs, value_enc,
                                    value_len, v_ptr_net, grammar)

    output = fluider.elementwise_add(apply_rule_output,
                                     select_t_output,
                                     select_c_output,
                                     select_v_output,
                                     axis=0)
    output = layers.reshape(output, shape=[batch_size, max_len, vocab_size])
    return output, valid_table_mask