Beispiel #1
0
 def test_backward(self):
     switch_main_program(Program())
     loss = self.build_network(False, print_phase='backward')
     exe = paddle.static.Executor(self.place)
     outs = exe.run(feed={'x': self.x_tensor},
                    fetch_list=[loss],
                    return_numpy=False)
Beispiel #2
0
def bilstm_net(program, input_feature, hid_dim=128, hid_dim2=96):
    switch_main_program(program)

    fc0 = fluid.layers.fc(input=input_feature, size=hid_dim * 4)
    rfc0 = fluid.layers.fc(input=input_feature, size=hid_dim * 4)

    lstm_h, c = fluid.layers.dynamic_lstm(input=fc0,
                                          size=hid_dim * 4,
                                          is_reverse=False)
    rlstm_h, c = fluid.layers.dynamic_lstm(input=rfc0,
                                           size=hid_dim * 4,
                                           is_reverse=True)

    # extract last step
    lstm_last = fluid.layers.sequence_last_step(input=lstm_h)
    rlstm_last = fluid.layers.sequence_last_step(input=rlstm_h)

    lstm_last_tanh = fluid.layers.tanh(lstm_last)
    rlstm_last_tanh = fluid.layers.tanh(rlstm_last)

    # concat layer
    lstm_concat = fluid.layers.concat(input=[lstm_last, rlstm_last], axis=1)
    # full connect layer
    fc = fluid.layers.fc(input=lstm_concat, size=hid_dim2, act='tanh')

    return fc
Beispiel #3
0
 def test_no_summarize(self):
     switch_main_program(Program())
     printed = self.build_network(True, summarize=-1, print_phase='forward')
     exe = paddle.static.Executor(self.place)
     outs = exe.run(feed={'x': self.x_tensor},
                    fetch_list=[printed],
                    return_numpy=False)
Beispiel #4
0
 def test_forward(self):
     switch_main_program(Program())
     printed = self.build_network(True, print_phase='forward')
     exe = Executor(self.place)
     outs = exe.run(feed={'x': self.x_tensor},
                    fetch_list=[printed],
                    return_numpy=False)
Beispiel #5
0
 def test_backward(self):
     switch_main_program(Program())
     loss = self.build_network(False, print_phase='backward')
     exe = Executor(self.place)
     outs = exe.run(feed={'x': self.x_tensor},
                    fetch_list=[loss],
                    return_numpy=False)
Beispiel #6
0
 def test_forward(self):
     switch_main_program(Program())
     printed = self.build_network(True, print_phase='forward')
     exe = Executor(self.place)
     outs = exe.run(feed={'x': self.x_tensor},
                    fetch_list=[printed],
                    return_numpy=False)
Beispiel #7
0
def bow_net(program, input_feature, hid_dim=128, hid_dim2=96):
    switch_main_program(program)

    bow = fluid.layers.sequence_pool(input=input_feature, pool_type='sum')
    bow_tanh = fluid.layers.tanh(bow)
    fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
    fc = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")

    return fc
Beispiel #8
0
def gru_net(program, input_feature, hid_dim=128, hid_dim2=96):
    switch_main_program(program)

    fc0 = fluid.layers.fc(input=input_feature, size=hid_dim * 3)
    gru_h = fluid.layers.dynamic_gru(input=fc0, size=hid_dim, is_reverse=False)
    gru_max = fluid.layers.sequence_pool(input=gru_h, pool_type='max')
    gru_max_tanh = fluid.layers.tanh(gru_max)
    fc = fluid.layers.fc(input=gru_max_tanh, size=hid_dim2, act='tanh')

    return fc
 def setUp(self):
     self._delta = 0.005
     self._max_sequence_len = 3
     self._program = Program()
     switch_main_program(self._program)
     self.output_dim = 10
     self.place = core.CPUPlace()
     self.prepare_x_tensor()
     self.prepare_static_input_tensor()
     self.exe = fluid.Executor(self.place)
 def setUp(self):
     self._delta = 0.005
     self._max_sequence_len = 3
     self._program = Program()
     switch_main_program(self._program)
     self.output_dim = 10
     self.place = core.CPUPlace()
     self.prepare_x_tensor()
     self.prepare_static_input_tensor()
     self.exe = fluid.Executor(self.place)
Beispiel #11
0
def cnn_net(program, input_feature, win_size=3, hid_dim=128, hid_dim2=96):
    switch_main_program(program)

    conv_3 = fluid.nets.sequence_conv_pool(input=input_feature,
                                           num_filters=hid_dim,
                                           filter_size=win_size,
                                           act="relu",
                                           pool_type="max")
    fc = fluid.layers.fc(input=conv_3, size=hid_dim2)

    return fc
Beispiel #12
0
def lstm_net(program, input_feature, hid_dim=128, hid_dim2=96):
    switch_main_program(program)

    fc0 = fluid.layers.fc(input=input_feature, size=hid_dim * 4)
    lstm_h, c = fluid.layers.dynamic_lstm(
        input=fc0, size=hid_dim * 4, is_reverse=False)
    lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
    lstm_max_tanh = fluid.layers.tanh(lstm_max)
    fc = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')

    return fc
Beispiel #13
0
 def setUp(self):
     self.main_program = Program()
     switch_main_program(self.main_program)
     x = layers.data('x', shape=[100], dtype='float32')
     x.stop_gradient = False
     rank_table_tensor = layers.data(
         'rank_table_tensor', shape=[1], dtype='float32', lod_level=1)
     table = layers.lod_rank_table(x=rank_table_tensor)
     i = layers.zeros(dtype='int64', shape=[1])
     self.mem1 = layers.shrink_memory(x=x, i=i, table=table)
     i = layers.increment(x=i)
     i.stop_gradient = True
     self.mem2 = layers.shrink_memory(x=self.mem1, i=i, table=table)
     i = layers.increment(x=i)
     i.stop_gradient = True
     self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table)
     mem3_mean = layers.mean(self.mem3)
     append_backward(loss=mem3_mean)
     self.x_grad = self.main_program.global_block().var('x@GRAD')
 def setUp(self):
     self.main_program = Program()
     switch_main_program(self.main_program)
     x = layers.data('x', shape=[100], dtype='float32')
     x.stop_gradient = False
     rank_table_tensor = layers.data(
         'rank_table_tensor', shape=[1], dtype='float32', lod_level=1)
     table = layers.lod_rank_table(x=rank_table_tensor)
     i = layers.zeros(dtype='int64', shape=[1])
     self.mem1 = layers.shrink_memory(x=x, i=i, table=table)
     i = layers.increment(x=i)
     i.stop_gradient = True
     self.mem2 = layers.shrink_memory(x=self.mem1, i=i, table=table)
     i = layers.increment(x=i)
     i.stop_gradient = True
     self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table)
     mem3_mean = layers.mean(self.mem3)
     append_backward(loss=mem3_mean)
     self.x_grad = self.main_program.global_block().var('x@GRAD')
Beispiel #15
0
    module = hub.Module(name="elmo")
    inputs, outputs, program = module.context(trainable=True)

    # Step2: Download dataset and use LACClassifyReade to read dataset
    dataset = hub.dataset.ChnSentiCorp()

    reader = hub.reader.LACClassifyReader(dataset=dataset,
                                          vocab_path=module.get_vocab_path())
    word_dict_len = len(reader.vocab)

    word_ids = inputs["word_ids"]
    elmo_embedding = outputs["elmo_embed"]

    # Step3: switch program and build network
    # Choose the net which you would like: bow, cnn, gru, bilstm, lstm
    switch_main_program(program)

    # Embedding layer
    word_embed_dims = 128
    word_embedding = fluid.layers.embedding(
        input=word_ids,
        size=[word_dict_len, word_embed_dims],
        param_attr=fluid.ParamAttr(learning_rate=30,
                                   initializer=fluid.initializer.Uniform(
                                       low=-0.1, high=0.1)))

    # Add elmo embedding
    input_feature = fluid.layers.concat(input=[elmo_embedding, word_embedding],
                                        axis=1)

    # Choose the net which you would like: bow, cnn, gru, bilstm, lstm