예제 #1
0
def fluid_sequence_index(input, index):
    """
    index: (batch_size, 1)
    """
    ones = layers.fill_constant_batch_size_like(input, shape=[-1,1], value=1, dtype='int64')
    output = layers.sequence_slice(input, offset=index, length=ones)
    return output
예제 #2
0
def fluid_sequence_first_step(lodtensor):
    """
    return a lod tensor
    """
    offset = layers.fill_constant_batch_size_like(lodtensor, shape=[-1,1], value=0, dtype='int64')
    length = layers.fill_constant_batch_size_like(lodtensor, shape=[-1,1], value=1, dtype='int64')
    res = layers.sequence_slice(lodtensor, offset=offset, length=length)
    return res
예제 #3
0
def fluid_sequence_advance(input, OOV):
    """
    args:
        input.data = [1,2,3, 4,5]
        input.lod = [[0, 3, 5]]
    return:
        output.data = [0,1,2, 0,4]
        output.lod = [[0, 3, 5]]
    """
    seq_len = fluid_sequence_get_seq_len(input)
    zeros = layers.fill_constant_batch_size_like(seq_len, shape=[-1,1], value=0, dtype='int64')
    ones = layers.fill_constant_batch_size_like(seq_len, shape=[-1,1], value=1, dtype='int64')
    oov = layers.sequence_slice(input, zeros, ones) * 0 + OOV
    oov.stop_gradient = True
    input_padded = layers.sequence_concat([oov, input])
    output = layers.sequence_slice(input_padded, zeros, seq_len)
    return output
예제 #4
0
def fluid_sequence_delay(input, OOV):
    """
    args:
        input: 1-level LoDTensor
    return:
        
    """
    seq_len = fluid_sequence_get_seq_len(input)
    zeros = layers.fill_constant_batch_size_like(seq_len,
                                                 shape=[-1, 1],
                                                 value=0,
                                                 dtype='int64')
    ones = layers.fill_constant_batch_size_like(seq_len,
                                                shape=[-1, 1],
                                                value=1,
                                                dtype='int64')
    oov = layers.sequence_slice(input, zeros, ones) * 0 + OOV
    oov.stop_gradient = True
    input_padded = layers.sequence_concat([input, oov])
    output = layers.sequence_slice(input_padded, ones, seq_len)
    return output
예제 #5
0
 def test_sequence_slice(self):
     program = Program()
     with program_guard(program):
         import numpy as np
         seqs = layers.data(
             name='x', shape=[10, 5], dtype='float32', lod_level=1)
         offset = layers.assign(input=np.array([[0, 1]]).astype('int32'))
         length = layers.assign(input=np.array([[2, 1]]).astype('int32'))
         out = layers.sequence_slice(
             input=seqs, offset=offset, length=length)
         self.assertIsNotNone(out)
     print(str(program))
예제 #6
0
def fluid_sequence_delay2(input, seq_len, OOV):
    """
    args:
        input: 1-level LoDTensor
        seq_len: 1-
    return:
        
    """
    oov = layers.cast(seq_len * 0 + OOV, input.dtype)
    oov.stop_gradient = True
    input_padded = layers.sequence_concat([input, oov])
    offset = layers.fill_constant_batch_size_like(seq_len,
                                                  shape=[-1, 1],
                                                  value=1,
                                                  dtype='int64')
    output = layers.sequence_slice(input_padded, offset,
                                   layers.cast(seq_len, 'int64'))
    return output
예제 #7
0
파일: layers.py 프로젝트: Yelrose/PGL
def topk_pool(gw, score, graph_id, ratio):
    """Implementation of topk pooling, where k means pooling ratio.
    
    Args:
        gw: Graph wrapper object.

        score: The attention score of all nodes, which is used to select 
               important nodes.

        graph_id: The graphs that the nodes belong to.

        ratio: The pooling ratio of nodes we want to select.

    Return: 
        perm: The index of nodes we choose.

        ratio_length: The selected node numbers of each graph.
    """

    graph_lod = gw.graph_lod
    graph_nodes = gw.num_nodes
    num_graph = gw.num_graph

    num_nodes = L.ones(shape=[graph_nodes], dtype="float32")
    num_nodes = L.lod_reset(num_nodes, graph_lod)
    num_nodes_per_graph = L.sequence_pool(num_nodes, pool_type='sum')
    max_num_nodes = L.reduce_max(num_nodes_per_graph, dim=0)
    max_num_nodes = L.cast(max_num_nodes, dtype="int32")

    index = L.arange(0, gw.num_nodes, dtype="int64")
    offset = L.gather(graph_lod, graph_id, overwrite=False)
    index = (index - offset) + (graph_id * max_num_nodes)
    index.stop_gradient = True

    # padding
    dense_score = L.fill_constant(shape=[num_graph * max_num_nodes],
                                  dtype="float32",
                                  value=-999999)
    index = L.reshape(index, shape=[-1])
    dense_score = L.scatter(dense_score, index, updates=score)
    num_graph = L.cast(num_graph, dtype="int32")
    dense_score = L.reshape(dense_score, shape=[num_graph, max_num_nodes])

    # record the sorted index
    _, sort_index = L.argsort(dense_score, axis=-1, descending=True)

    # recover the index range
    graph_lod = graph_lod[:-1]
    graph_lod = L.reshape(graph_lod, shape=[-1, 1])
    graph_lod = L.cast(graph_lod, dtype="int64")
    sort_index = L.elementwise_add(sort_index, graph_lod, axis=-1)
    sort_index = L.reshape(sort_index, shape=[-1, 1])

    # use sequence_slice to choose selected node index
    pad_lod = L.arange(0, (num_graph + 1) * max_num_nodes,
                       step=max_num_nodes,
                       dtype="int32")
    sort_index = L.lod_reset(sort_index, pad_lod)
    ratio_length = L.ceil(num_nodes_per_graph * ratio)
    ratio_length = L.cast(ratio_length, dtype="int64")
    ratio_length = L.reshape(ratio_length, shape=[-1, 1])
    offset = L.zeros(shape=[num_graph, 1], dtype="int64")
    choose_index = L.sequence_slice(input=sort_index,
                                    offset=offset,
                                    length=ratio_length)

    perm = L.reshape(choose_index, shape=[-1])
    return perm, ratio_length