示例#1
0
def _process_type_midd(condition, decoder, grammar_stack, next_inputs,
                       predicted_ids):
    """Process when output type is MID

    Args:
        condition (TYPE): NULL
        decoder (TYPE): NULL
        grammar_stack (TYPE): NULL
        next_inputs (TYPE): NULL
        predicted_ids (TYPE): NULL

    Returns: TODO

    Raises: NULL
    """
    midd_pred_ids = fluider.elementwise_mul(predicted_ids,
                                            condition,
                                            axis=0,
                                            force=True)
    ## get grammar desc
    # 解码结果(语法ID)对应的具体语法规则序列。比如解码结果为 SingleSQL,则对应的语法序列为 Select Filter
    # shape = [batch_size, grammar.max_desc_len]
    gmr_desc = decoder.grammar_desc(midd_pred_ids)
    # 语法规则序列的长度,比如 SingleSQL --> Select Filter, 则长度为2
    # shape = [batch_size, 1]
    gmr_desc_lens = decoder.grammar_desc_lens(midd_pred_ids)
    # shape = [batch_size, 1]
    gmr_desc_pos = tensor.zeros_like(gmr_desc_lens)

    ## generate next grammar mask by first token in desc
    next_output = nn_utils.batch_gather(gmr_desc, gmr_desc_pos)
    next_actions = decoder.grammar_action(next_output)
    next_gmr_mask = decoder.grammar_mask(next_output)

    ## push left grammar tokens to stack
    gmr_stack_tmp, gmr_stack_pos_tmp = _push_to_stack(gmr_desc, gmr_desc_pos,
                                                      gmr_desc_lens,
                                                      grammar_stack)

    ## save result, while condition is True
    new_gmr_stack, new_gmr_stack_pos, new_actions, new_gmr_mask = nn_utils.ifelse(
        condition,
        [gmr_stack_tmp, gmr_stack_pos_tmp, next_actions, next_gmr_mask], [
            grammar_stack.data, grammar_stack.pos, next_inputs.action,
            next_inputs.gmr_mask
        ])
    layers.utils.map_structure(
        layers.assign,
        [new_gmr_stack, new_gmr_stack_pos, new_actions, new_gmr_mask], [
            grammar_stack.data, grammar_stack.pos, next_inputs.action,
            next_inputs.gmr_mask
        ])
示例#2
0
def _process_type_leaf(condition, decoder, grammar_stack, next_inputs,
                       finished):
    """Process when output type is LEAF

    Args:
        condition (TYPE): NULL
        decoder (TYPE): NULL
        grammar_stack (StackData): (gmr_stack_data, gmr_stack_pos)
        next_inputs (DecoderInputsWrapper): (input_var, action, grammar_mask)
        finished (TYPE): NULL

    Returns: None

    Raises: NULL
    """
    ## pop stack
    next_output, valid_pos, gmr_stack_tmp = data_structure.Stack.pop(
        grammar_stack, mask=True, in_place=False)
    valid_pos = fluider.squeeze(valid_pos, [1])

    ## update next grammar mask
    next_actions = layers.elementwise_mul(decoder.grammar_action(next_output),
                                          layers.cast(
                                              valid_pos,
                                              dtype=next_inputs.action.dtype),
                                          axis=0)
    next_gmr_mask = layers.elementwise_mul(
        decoder.grammar_mask(next_output),
        layers.cast(valid_pos, dtype=next_inputs.gmr_mask.dtype),
        axis=0)

    ## save result, while condition is True
    new_gmr_stack_data, new_gmr_stack_pos, new_actions, new_gmr_mask = nn_utils.ifelse(
        condition,
        [gmr_stack_tmp.data, gmr_stack_tmp.pos, next_actions, next_gmr_mask], [
            grammar_stack.data, grammar_stack.pos, next_inputs.action,
            next_inputs.gmr_mask
        ])

    layers.utils.map_structure(
        layers.assign,
        [new_gmr_stack_data, new_gmr_stack_pos, next_actions, new_gmr_mask], [
            grammar_stack.data, grammar_stack.pos, next_inputs.action,
            next_inputs.gmr_mask
        ])
    layers.logical_or(finished,
                      layers.logical_and(condition,
                                         layers.logical_not(valid_pos)),
                      out=finished)
示例#3
0
def _save_predict_output(outputs_array, predicted_ids, finished):
    """save predicted_ids to outputs_array, while finished is false.

    Args:
        outputs_array (TYPE): NULL
        predicted_ids (TYPE): NULL
        finished (TYPE): NULL

    Returns: TODO

    Raises: NULL
    """
    out_arr_tmp = data_structure.Array.push(outputs_array,
                                            predicted_ids,
                                            in_place=False)
    new_arr_data, new_arr_pos = nn_utils.ifelse(
        finished, [outputs_array.data, outputs_array.pos],
        [out_arr_tmp.data, out_arr_tmp.pos])
    layers.utils.map_structure(layers.assign, [new_arr_data, new_arr_pos],
                               [outputs_array.data, outputs_array.pos])
示例#4
0
def _push_to_stack(gmr_desc, gmr_pos, gmr_lens, gmr_stack_info):
    """push grammar id in gmr_desc from gmr_pos to gmr_lens to
    gmr_stack. and update step_gmr_pos

    Args:
        gmr_desc (TYPE): NULL
        gmr_pos (TYPE): NULL
        gmr_lens (TYPE): NULL
        gmr_stack_info (tuple): [in/out] (gmr_stack, gmr_stack_pos)

    Returns: tuple (gmr_stack, gmr_stack_pos)

    Raises: NULL
    """
    gmr_stack, gmr_stack_pos = gmr_stack_info
    mv_step = layers.cast(layers.greater_than(gmr_lens,
                                              layers.zeros_like(gmr_lens)),
                          dtype=gmr_lens.dtype)
    gmr_mv_pos = layers.elementwise_sub(gmr_lens, mv_step)

    cond = layers.reduce_any(layers.greater_than(gmr_mv_pos, gmr_pos))
    while_op = layers.While(cond)
    with while_op.block():
        gmr_ids = nn_utils.batch_gather(gmr_desc, gmr_mv_pos)
        gmr_stack_tmp, gmr_stack_pos_tmp = data_structure.Stack.push(
            gmr_stack_info, gmr_ids, in_place=False)

        mv_cond = layers.greater_than(gmr_mv_pos, gmr_pos)
        gmr_mv_pos_tmp = fluider.elementwise_sub(gmr_mv_pos,
                                                 mv_cond,
                                                 force=True)
        new_gmr_stack, new_gmr_stack_pos = nn_utils.ifelse(
            mv_cond, [gmr_stack_tmp, gmr_stack_pos_tmp],
            [gmr_stack, gmr_stack_pos])
        layers.utils.map_structure(layers.assign,
                                   [new_gmr_stack, new_gmr_stack_pos],
                                   [gmr_stack, gmr_stack_pos])
        layers.assign(gmr_mv_pos_tmp, gmr_mv_pos)
        layers.assign(
            layers.reduce_any(layers.greater_than(gmr_mv_pos, gmr_pos)), cond)
    return gmr_stack, gmr_stack_pos
示例#5
0
def _check_finished(decoder, next_inputs, finished, outputs_array):
    """check finished instance by next_inputs.action, and
    update finished tag and write END to outputs

    Args:
        decoder (TYPE): NULL
        next_inputs (TYPE): NULL
        finished (TYPE): NULL
        outputs_array (TYPE): NULL

    Returns: TODO

    Raises: NULL
    """
    act_stop = tensor.fill_constant_batch_size_like(
        next_inputs.action,
        shape=next_inputs.action.shape,
        value=decoder._grammar.ACTION_STOP,
        dtype='int64')
    new_finished = layers.logical_and(
        layers.equal(next_inputs.action, act_stop),
        layers.logical_not(finished))

    end_token_id = tensor.fill_constant_batch_size_like(
        outputs_array.data,
        shape=[-1],
        value=decoder._grammar.END,
        dtype=outputs_array.data.dtype)
    out_data_tmp, out_pos_tmp = data_structure.Array.push(outputs_array,
                                                          end_token_id,
                                                          in_place=False)
    new_data, new_pos = nn_utils.ifelse(
        new_finished, [out_data_tmp, out_pos_tmp],
        [outputs_array.data, outputs_array.pos])

    layers.assign(new_data, outputs_array.data)
    layers.assign(new_pos, outputs_array.pos)
    layers.logical_or(finished, new_finished, out=finished)