def __init__(self, worker_id):
        self.worker_id = worker_id
        self.scope = fluid.core.Scope()
        self.program = fluid.Program()
        self.startup = fluid.Program()
        with fluid.program_guard(self.program, self.startup):
            img, label, self.loss = mnist_network()

        self.place = fluid.CPUPlace()
        self.executor = fluid.Executor(self.place)
        self.reader_creator = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.mnist.train(), buf_size=8192),
                                           batch_size=BATCH_SIZE)

        self.reader = self.reader_creator()
        self.feeder = fluid.DataFeeder(feed_list=[img, label],
                                       place=self.place)
Example #2
0
    def test_train_dyn_rnn(self):
        main_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(main_program, startup_program):
            sentence = fluid.layers.data(name='word',
                                         shape=[1],
                                         dtype='int64',
                                         lod_level=1)
            sent_emb = fluid.layers.embedding(input=sentence,
                                              size=[len(self.word_dict), 32],
                                              dtype='float32')

            rnn = fluid.layers.DynamicRNN()

            with rnn.block():
                in_ = rnn.step_input(sent_emb)
                mem = rnn.memory(shape=[100], dtype='float32')
                out_ = fluid.layers.fc(input=[in_, mem], size=100, act='tanh')
                rnn.update_memory(mem, out_)
                rnn.output(out_)

            last = fluid.layers.sequence_pool(input=rnn(), pool_type='last')
            logits = fluid.layers.fc(input=last, size=1, act=None)
            label = fluid.layers.data(name='label', shape=[1], dtype='float32')
            loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits,
                                                                  label=label)
            loss = fluid.layers.mean(x=loss)
            sgd = fluid.optimizer.Adam(1e-3)
            sgd.minimize(loss=loss)

        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
        exe.run(startup_program)
        feeder = fluid.DataFeeder(feed_list=[sentence, label], place=cpu)
        data = next(self.train_data())
        loss_0 = exe.run(main_program,
                         feed=feeder.feed(data),
                         fetch_list=[loss])[0]
        for _ in xrange(100):
            val = exe.run(main_program,
                          feed=feeder.feed(data),
                          fetch_list=[loss])[0]
        # loss should be small after 100 mini-batch
        self.assertLess(val[0], loss_0[0])
def main():
    BATCH_SIZE = 100
    PASS_NUM = 5

    word_dict = paddle.dataset.imdb.word_dict()
    print "load word dict successfully"
    dict_dim = len(word_dict)
    class_dim = 2

    data = fluid.layers.data(name="words",
                             shape=[1],
                             dtype="int64",
                             lod_level=1)
    label = fluid.layers.data(name="label", shape=[1], dtype="int64")
    cost, accuracy, acc_out = stacked_lstm_net(data,
                                               label,
                                               input_dim=dict_dim,
                                               class_dim=class_dim)

    train_data = paddle.batch(paddle.reader.shuffle(
        paddle.dataset.imdb.train(word_dict), buf_size=1000),
                              batch_size=BATCH_SIZE)
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    feeder = fluid.DataFeeder(feed_list=[data, label], place=place)

    exe.run(fluid.default_startup_program())

    for pass_id in xrange(PASS_NUM):
        accuracy.reset(exe)
        for data in train_data():
            cost_val, acc_val = exe.run(fluid.default_main_program(),
                                        feed=feeder.feed(data),
                                        fetch_list=[cost, acc_out])
            pass_acc = accuracy.eval(exe)
            print("cost=" + str(cost_val) + " acc=" + str(acc_val) +
                  " pass_acc=" + str(pass_acc))
            if cost_val < 1.0 and acc_val > 0.8:
                exit(0)
    exit(1)
test_accuracy = fluid.evaluator.Accuracy(input=predict_word,
                                         label=next_word,
                                         main_program=inference_program)
test_target = [avg_cost] + test_accuracy.metrics + test_accuracy.states
inference_program = fluid.io.get_inference_program(
    test_target, main_program=inference_program)
train_reader = paddle.batch(paddle.dataset.imikolov.train(word_dict, N),
                            BATCH_SIZE)
test_reader = paddle.batch(paddle.dataset.imikolov.test(word_dict, N),
                           BATCH_SIZE)

place = fluid.GPUPlace(0)
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(
    feed_list=[first_word, second_word, third_word, forth_word, next_word],
    place=place)
exe.run(fluid.default_startup_program())

for pass_id in range(PASS_NUM):
    batch_id = 0
    accuracy.reset(exe)
    print("begin")
    print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
    time_begin = datetime.datetime.now()
    for data in train_reader():
        if batch_id % 100 == 0 and batch_id != 0:
            loss, acc = exe.run(fluid.default_main_program(),
                                feed=feeder.feed(data),
                                fetch_list=[avg_cost] + accuracy.metrics)
            pass_acc = accuracy.eval(exe)
Example #5
0
def main(window_size, dict_size=10000, emb_size=32, num_neg_samples=10,
         batch_size=512, with_parallel_do=False, sparse_update=True):
    assert window_size % 2 == 1
    words = []
    for i in xrange(window_size):
        words.append(fluid.layers.data(name='word_{0}'.format(i), shape=[1],
                                       dtype='int64'))

    dict_size = min(MAX_DICT_SIZE, dict_size)
    label_word = int(window_size / 2) + 1

    def networks(word_list):
        embs = []
        for i in xrange(window_size):
            if i == label_word:
                continue

            emb = fluid.layers.embedding(
                input=word_list[i],
                size=[dict_size, emb_size],
                param_attr='emb.w',
                is_sparse=sparse_update)

            embs.append(emb)

        embs = fluid.layers.concat(input=embs, axis=1)
        loss = fluid.layers.nce(input=embs,
                                label=word_list[label_word],
                                num_total_classes=dict_size,
                                param_attr='nce.w',
                                bias_attr='nce.b',
                                num_neg_samples=num_neg_samples)
        avg_loss = fluid.layers.mean(x=loss)
        avg_loss /= num_neg_samples + 1
        return avg_loss

    if with_parallel_do:
        for_loop = fluid.layers.ParallelDo(fluid.layers.get_places())
        with for_loop.do():
            word_list = []
            for w in words:
                word_list.append(for_loop.read_input(w))
            for_loop.write_output(networks(word_list))
        avg_loss = fluid.layers.mean(x=for_loop())
    else:
        avg_loss = networks(words)

    adam = fluid.optimizer.Adagrad(learning_rate=1e-3)
    adam.minimize(loss=avg_loss)

    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    feeder = fluid.DataFeeder(feed_list=words, place=place)

    exe.run(fluid.default_startup_program())
    reader = paddle.batch(
        paddle.reader.buffered(
            reader_creator(window_size=window_size,
                           word_limit=dict_size - 1,
                           path="./preprocessed"), 4000), batch_size)

    for pass_id in xrange(100):
        fluid.io.save_params(exe, dirname='model_{0}'.format(pass_id))
        for batch_id, data in enumerate(reader()):
            avg_loss_np = exe.run(feed=feeder.feed(data), fetch_list=[avg_loss])
            print "Pass ID {0}, Batch ID {1}, Loss {2}".format(pass_id,
                                                               batch_id,
                                                               avg_loss_np[0])
Example #6
0
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
opts = optimizer.minimize(avg_cost)

accuracy = fluid.evaluator.Accuracy(input=predict, label=label)

BATCH_SIZE = 128
PASS_NUM = 20

train_reader = paddle.batch(
    paddle.reader.shuffle(
        paddle.dataset.cifar.train10(), buf_size=50000),
    batch_size=BATCH_SIZE)

place = fluid.CPUPlace()
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(place=place, feed_list=[images, label])
exe.run(fluid.default_startup_program())

for pass_id in range(PASS_NUM):
    accuracy.reset(exe)
    pass_time = time.time()
    iterator = 0
    for data in train_reader():
        loss, acc = exe.run(fluid.default_main_program(),
                            feed=feeder.feed(data),
                            fetch_list=[avg_cost] + accuracy.metrics)
        pass_acc = accuracy.eval(exe)
        if iterator % 100 == 0:
            print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str(
                pass_acc))
        # this model is slow, so if we can train two mini batch, we think it works properly.
Example #7
0
def test_converter():
    img = fluid.layers.data(name='image', shape=[1, 28, 28])
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')
    feeder = fluid.DataFeeder([img, label], fluid.CPUPlace())
    result = feeder.feed([[[0] * 784, [9]], [[1] * 784, [1]]])
    print(result)
Example #8
0
def main(args):
    """ main
    """
    model_path = args.pretrained_model

    paddle.init(use_gpu=args.with_gpu)

    #1, define network topology
    input_size = cfg.INPUT_SIZE
    output_size = cfg.INPUT_SIZE / cfg.STRIDE

    image = fluid.layers.data(name='image',
                              shape=[3, input_size, input_size],
                              dtype='float32')
    vecmap = fluid.layers.data(name='vecmap',
                               shape=[cfg.VEC_NUM, output_size, output_size],
                               dtype='float32')
    heatmap = fluid.layers.data(
        name='heatmap',
        shape=[cfg.HEATMAP_NUM, output_size, output_size],
        dtype='float32')
    vecmask = fluid.layers.data(name='vecmask',
                                shape=[cfg.VEC_NUM, output_size, output_size],
                                dtype='float32')
    heatmask = fluid.layers.data(
        name='heatmask',
        shape=[cfg.HEATMAP_NUM, output_size, output_size],
        dtype='float32')

    net = MyNet({'data': image})

    vec1 = net.layers['conv5_5_CPM_L1']
    heatmap1 = net.layers['conv5_5_CPM_L2']
    vec2 = net.layers['Mconv7_stage2_L1']
    heatmap2 = net.layers['Mconv7_stage2_L2']
    vec3 = net.layers['Mconv7_stage3_L1']
    heatmap3 = net.layers['Mconv7_stage3_L2']
    vec4 = net.layers['Mconv7_stage4_L1']
    heatmap4 = net.layers['Mconv7_stage4_L2']
    vec5 = net.layers['Mconv7_stage5_L1']
    heatmap5 = net.layers['Mconv7_stage5_L2']
    vec6 = net.layers['Mconv7_stage6_L1']
    heatmap6 = net.layers['Mconv7_stage6_L2']

    loss1_1 = get_mask_loss(vec1, vecmap, vecmask)
    loss1_2 = get_mask_loss(heatmap1, heatmap, heatmask)
    loss2_1 = get_mask_loss(vec2, vecmap, vecmask)
    loss2_2 = get_mask_loss(heatmap2, heatmap, heatmask)
    loss3_1 = get_mask_loss(vec3, vecmap, vecmask)
    loss3_2 = get_mask_loss(heatmap3, heatmap, heatmask)
    loss4_1 = get_mask_loss(vec4, vecmap, vecmask)
    loss4_2 = get_mask_loss(heatmap4, heatmap, heatmask)
    loss5_1 = get_mask_loss(vec5, vecmap, vecmask)
    loss5_2 = get_mask_loss(heatmap5, heatmap, heatmask)
    loss6_1 = get_mask_loss(vec6, vecmap, vecmask)
    loss6_2 = get_mask_loss(heatmap6, heatmap, heatmask)

    loss1 = loss1_1 + loss2_1 + loss3_1 + loss4_1 + loss5_1 + loss6_1
    cost1 = fluid.layers.mean(x=loss1)

    loss2 = loss1_2 + loss2_2 + loss3_2 + loss4_2 + loss5_2 + loss6_2
    cost2 = fluid.layers.mean(x=loss2)

    avg_cost = cost1 + cost2
    #avg_cost = fluid.layers.mean(x=cost)

    model_save_dir = '../models/checkpoints'

    global_step = layers.create_global_var(shape=[1],
                                           value=0.0,
                                           dtype='float32',
                                           persistable=True,
                                           force_cpu=True)
    lr_rate = lr_decay.piecewise_decay(global_step,
                                       values=cfg.LEARNING_RATE_SECTION,
                                       boundaries=cfg.BATCH_SECTION)

    # set learning_rate batch_size  num_passes  model_save_dir

    optimizer = fluid.optimizer.Momentum(
        learning_rate=cfg.LEARNING_RATE,
        global_step=global_step,
        momentum=cfg.MOMENTUM,
        regularization=fluid.regularizer.L2Decay(cfg.WEIGHT_DECAY))

    opts = optimizer.minimize(avg_cost)

    inference_program = fluid.default_main_program().clone()
    with fluid.program_guard(inference_program):
        test_target = [avg_cost]
        inference_program = fluid.io.get_inference_program(test_target)

    place = fluid.CUDAPlace(3) if args.with_gpu is True else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    train_reader = paddle.batch(CocoFolder.CocoFolder(
        cfg.TRAIN_DATA_PATH, [
            cfg.TRAIN_IMAGELIST_FILE, cfg.TRAIN_MASKLIST_FILE,
            cfg.TRAIN_KPTJSON_FILE
        ], cfg.STRIDE,
        Mytransforms.Compose([
            Mytransforms.RandomResized(),
            Mytransforms.RandomRotate(cfg.RANDOM_ROTATE_ANGLE),
            Mytransforms.RandomCrop(cfg.INPUT_SIZE),
            Mytransforms.RandomHorizontalFlip(),
        ])).reader,
                                batch_size=cfg.BATCH_SIZE)

    feeder = fluid.DataFeeder(
        place=place, feed_list=[image, vecmap, heatmap, vecmask, heatmask])

    if not model_path:
        pass
    elif model_path.find('.npy') > 0:
        net.load(data_path=model_path, exe=exe, place=place)
    else:
        net.load(data_path=model_path, exe=exe)

    for pass_id in range(cfg.NUM_PASSES):
        for batch_id, data in enumerate(train_reader()):
            loss, step_v, lr_rate_v = exe.run(fluid.default_main_program(),
                                              feed=feeder.feed(data),
                                              fetch_list=[avg_cost] +
                                              [global_step] + [lr_rate])
            print("Pass {0}, batch {1}, loss {2}, step {3}, lr{4}".format(
                pass_id, batch_id, loss[0], step_v[0], lr_rate_v[0]))
            if batch_id % 3000 == 0:
                model_path = os.path.join(model_save_dir,
                                          'batch' + str(batch_id))
                print 'save models to %s' % (model_path)
                fluid.io.save_inference_model(model_path, ['image'],
                                              [vec6, heatmap6], exe)
        '''
        test loss needed
        for data in test_reader():
            loss = exe.run(inference_program,
                                feed=feeder.feed(data),
                                fetch_list=[avg_cost])
        '''

        print("End pass {0}".format(pass_id))

        if pass_id % 1 == 0:
            model_path = os.path.join(model_save_dir, 'pass' + str(pass_id))
            print 'save models to %s' % (model_path)
            fluid.io.save_inference_model(model_path, ['image'],
                                          [vec6, heatmap6], exe)
def main():
    # define network topology
    word = fluid.layers.data(name='word_data',
                             shape=[1],
                             dtype='int64',
                             lod_level=1)
    predicate = fluid.layers.data(name='verb_data',
                                  shape=[1],
                                  dtype='int64',
                                  lod_level=1)
    ctx_n2 = fluid.layers.data(name='ctx_n2_data',
                               shape=[1],
                               dtype='int64',
                               lod_level=1)
    ctx_n1 = fluid.layers.data(name='ctx_n1_data',
                               shape=[1],
                               dtype='int64',
                               lod_level=1)
    ctx_0 = fluid.layers.data(name='ctx_0_data',
                              shape=[1],
                              dtype='int64',
                              lod_level=1)
    ctx_p1 = fluid.layers.data(name='ctx_p1_data',
                               shape=[1],
                               dtype='int64',
                               lod_level=1)
    ctx_p2 = fluid.layers.data(name='ctx_p2_data',
                               shape=[1],
                               dtype='int64',
                               lod_level=1)
    mark = fluid.layers.data(name='mark_data',
                             shape=[1],
                             dtype='int64',
                             lod_level=1)
    feature_out = db_lstm(**locals())
    target = fluid.layers.data(name='target',
                               shape=[1],
                               dtype='int64',
                               lod_level=1)
    crf_cost = fluid.layers.linear_chain_crf(input=feature_out,
                                             label=target,
                                             param_attr=fluid.ParamAttr(
                                                 name='crfw',
                                                 learning_rate=mix_hidden_lr))
    avg_cost = fluid.layers.mean(x=crf_cost)

    # TODO(qiao)
    # check other optimizers and check why out will be NAN
    sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.0001)
    sgd_optimizer.minimize(avg_cost)

    # TODO(qiao)
    # add dependency track and move this config before optimizer
    crf_decode = fluid.layers.crf_decoding(
        input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))

    precision, recall, f1_score = fluid.layers.chunk_eval(
        input=crf_decode,
        label=target,
        chunk_scheme="IOB",
        num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))

    train_data = paddle.batch(paddle.reader.shuffle(
        paddle.dataset.conll05.test(), buf_size=8192),
                              batch_size=BATCH_SIZE)
    place = fluid.CPUPlace()
    feeder = fluid.DataFeeder(feed_list=[
        word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target
    ],
                              place=place)
    exe = fluid.Executor(place)

    exe.run(fluid.default_startup_program())

    embedding_param = fluid.g_scope.find_var(embedding_name).get_tensor()
    embedding_param.set(
        load_parameter(conll05.get_embedding(), word_dict_len, word_dim),
        place)

    batch_id = 0
    for pass_id in xrange(PASS_NUM):
        for data in train_data():
            outs = exe.run(fluid.default_main_program(),
                           feed=feeder.feed(data),
                           fetch_list=[avg_cost, precision, recall, f1_score])
            avg_cost_val = np.array(outs[0])
            precision_val = np.array(outs[1])
            recall_val = np.array(outs[2])
            f1_score_val = np.array(outs[3])

            if batch_id % 10 == 0:
                print("avg_cost=" + str(avg_cost_val))
                print("precision_val=" + str(precision_val))
                print("recall_val:" + str(recall_val))
                print("f1_score_val:" + str(f1_score_val))

            # exit early for CI
            exit(0)

            batch_id = batch_id + 1
Example #10
0
    def test_plain_while_op(self):
        main_program = fluid.Program()
        startup_program = fluid.Program()

        with fluid.program_guard(main_program, startup_program):
            sentence = fluid.layers.data(name='word',
                                         shape=[1],
                                         dtype='int64',
                                         lod_level=1)
            sent_emb = fluid.layers.embedding(input=sentence,
                                              size=[len(self.word_dict), 32],
                                              dtype='float32')

            label = fluid.layers.data(name='label', shape=[1], dtype='float32')

            rank_table = fluid.layers.lod_rank_table(x=sent_emb)

            sent_emb_array = fluid.layers.lod_tensor_to_array(x=sent_emb,
                                                              table=rank_table)

            seq_len = fluid.layers.max_sequence_len(rank_table=rank_table)
            i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
            i.stop_gradient = False

            boot_mem = fluid.layers.fill_constant_batch_size_like(
                input=fluid.layers.array_read(array=sent_emb_array, i=i),
                value=0,
                shape=[-1, 100],
                dtype='float32')
            boot_mem.stop_gradient = False

            mem_array = fluid.layers.array_write(x=boot_mem, i=i)

            cond = fluid.layers.less_than(x=i, y=seq_len)
            cond.stop_gradient = False
            while_op = fluid.layers.While(cond=cond)
            out = fluid.layers.create_array(dtype='float32')

            with while_op.block():
                mem = fluid.layers.array_read(array=mem_array, i=i)
                ipt = fluid.layers.array_read(array=sent_emb_array, i=i)

                mem = fluid.layers.shrink_memory(x=mem, i=i, table=rank_table)

                hidden = fluid.layers.fc(input=[mem, ipt],
                                         size=100,
                                         act='tanh')

                fluid.layers.array_write(x=hidden, i=i, array=out)
                fluid.layers.increment(x=i, in_place=True)
                fluid.layers.array_write(x=hidden, i=i, array=mem_array)
                fluid.layers.less_than(x=i, y=seq_len, cond=cond)

            all_timesteps = fluid.layers.array_to_lod_tensor(x=out,
                                                             table=rank_table)
            last = fluid.layers.sequence_pool(input=all_timesteps,
                                              pool_type='last')
            logits = fluid.layers.fc(input=last, size=1, act=None)
            loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits,
                                                                  label=label)
            loss = fluid.layers.mean(x=loss)
            sgd = fluid.optimizer.SGD(1e-4)
            sgd.minimize(loss=loss)
        cpu = fluid.CPUPlace()
        exe = fluid.Executor(cpu)
        exe.run(startup_program)
        feeder = fluid.DataFeeder(feed_list=[sentence, label], place=cpu)

        data = next(self.train_data())
        val = exe.run(main_program, feed=feeder.feed(data),
                      fetch_list=[loss])[0]
        self.assertEqual((1, ), val.shape)
        print(val)
        self.assertFalse(numpy.isnan(val))
Example #11
0
y = fluid.layers.data(name='y', shape=[1], dtype='float32')

cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(x=cost)

sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
sgd_optimizer.minimize(avg_cost)

BATCH_SIZE = 20

train_reader = paddle.batch(paddle.reader.shuffle(
    paddle.dataset.uci_housing.train(), buf_size=500),
                            batch_size=BATCH_SIZE)

place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)

exe.run(fluid.default_startup_program())

PASS_NUM = 100
for pass_id in range(PASS_NUM):
    fluid.io.save_persistables(exe, "./fit_a_line.model/")
    fluid.io.load_persistables(exe, "./fit_a_line.model/")
    for data in train_reader():
        avg_loss_value, = exe.run(fluid.default_main_program(),
                                  feed=feeder.feed(data),
                                  fetch_list=[avg_cost])

        if avg_loss_value[0] < 10.0:
            exit(0)  # if avg cost less than 10.0, we think our code is good.
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.01)
optimizer.minimize(avg_cost)

accuracy = fluid.evaluator.Accuracy(input=predict, label=label)

BATCH_SIZE = 50
PASS_NUM = 3
train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.mnist.train(),
                                                  buf_size=500),
                            batch_size=BATCH_SIZE)

place = fluid.CPUPlace()
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=[images, label], place=place)
exe.run(fluid.default_startup_program())

for pass_id in range(PASS_NUM):
    accuracy.reset(exe)
    for data in train_reader():
        loss, acc = exe.run(fluid.default_main_program(),
                            feed=feeder.feed(data),
                            fetch_list=[avg_cost] + accuracy.metrics)
        pass_acc = accuracy.eval(exe)
        print("pass_id=" + str(pass_id) + " acc=" + str(acc) + " pass_acc=" +
              str(pass_acc))
        # print loss, acc
        if loss < 10.0 and pass_acc > 0.9:
            # if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good.
            exit(0)
Example #13
0
def main(dict_path):
    word_dict = load_vocab(dict_path)
    word_dict["<unk>"] = len(word_dict)
    dict_dim = len(word_dict)
    print("The dictionary size is : %d" % dict_dim)

    data, label, prediction, avg_cost = conv_net(dict_dim)

    sgd_optimizer = fluid.optimizer.SGD(learning_rate=conf.learning_rate)
    sgd_optimizer.minimize(avg_cost)

    accuracy = fluid.evaluator.Accuracy(input=prediction, label=label)

    inference_program = fluid.default_main_program().clone()
    with fluid.program_guard(inference_program):
        test_target = accuracy.metrics + accuracy.states
        inference_program = fluid.io.get_inference_program(test_target)

    # The training data set.
    train_reader = paddle.batch(
        paddle.reader.shuffle(
            paddle.dataset.imdb.train(word_dict), buf_size=51200),
        batch_size=conf.batch_size)

    # The testing data set.
    test_reader = paddle.batch(
        paddle.reader.shuffle(
            paddle.dataset.imdb.test(word_dict), buf_size=51200),
        batch_size=conf.batch_size)

    if conf.use_gpu:
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()

    exe = fluid.Executor(place)

    feeder = fluid.DataFeeder(feed_list=[data, label], place=place)

    exe.run(fluid.default_startup_program())

    def test(exe):
        accuracy.reset(exe)
        for batch_id, data in enumerate(test_reader()):
            input_seq = to_lodtensor(map(lambda x: x[0], data), place)
            y_data = np.array(map(lambda x: x[1], data)).astype("int64")
            y_data = y_data.reshape([-1, 1])
            acc = exe.run(inference_program,
                          feed={"words": input_seq,
                                "label": y_data})
        test_acc = accuracy.eval(exe)
        return test_acc

    total_time = 0.
    for pass_id in xrange(conf.num_passes):
        accuracy.reset(exe)
        start_time = time.time()
        for batch_id, data in enumerate(train_reader()):
            cost_val, acc_val = exe.run(
                fluid.default_main_program(),
                feed=feeder.feed(data),
                fetch_list=[avg_cost, accuracy.metrics[0]])
            pass_acc = accuracy.eval(exe)
            if batch_id and batch_id % conf.log_period == 0:
                print("Pass id: %d, batch id: %d, cost: %f, pass_acc %f" %
                      (pass_id, batch_id, cost_val, pass_acc))
        end_time = time.time()
        total_time += (end_time - start_time)
        pass_test_acc = test(exe)
        print("Pass id: %d, test_acc: %f" % (pass_id, pass_test_acc))
    print("Total train time: %f" % (total_time))