コード例 #1
0
def infer(use_cuda, inference_program, params_dirname=None):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    inferencer = fluid.Inferencer(infer_func=inference_program,
                                  param_path=params_dirname,
                                  place=place)

    data1 = [[word_dict['among']]]  # 'among'
    data2 = [[word_dict['a']]]  # 'a'
    data3 = [[word_dict['group']]]  # 'group'
    data4 = [[word_dict['of']]]  # 'of'
    lod = [[1]]

    first_word = fluid.create_lod_tensor(data1, lod, place)
    second_word = fluid.create_lod_tensor(data2, lod, place)
    third_word = fluid.create_lod_tensor(data3, lod, place)
    fourth_word = fluid.create_lod_tensor(data4, lod, place)

    result = inferencer.infer(
        {
            'firstw': first_word,
            'secondw': second_word,
            'thirdw': third_word,
            'fourthw': fourth_word
        },
        return_numpy=False)
    print('softmax result=')
    print(numpy.array(result[0]))
    #     print(numpy.array(embedding_second))
    most_possible_word_index = numpy.argmax(result[0])
    #     print(most_possible_word_index)
    print('amog a group of :')
    print([
        key for key, value in word_dict.iteritems()
        if value == most_possible_word_index
    ][0])
コード例 #2
0
def infer(params_dir):
    place = fluid.CUDAPlace(0)
    inferencer = fluid.Inferencer(
        infer_func=inference_network, param_path=params_dir, place=place)
     # Prepare testing data. 
    from PIL import Image
    import numpy as np
    import os

    def load_image(file):
        im = Image.open(file)
        im = im.resize((32, 32), Image.ANTIALIAS)
        im = np.array(im).astype(np.float32)
        """transpose [H W C] to [C H W]"""
        im = im.transpose((2, 0, 1)) 
        im = im / 255.0

        # Add one dimension, [N C H W] N=1
        im = np.expand_dims(im, axis=0)
        return im
    cur_dir = os.path.dirname(os.path.realpath(__file__))
    img = load_image(cur_dir + '/dog.png')
    # inference
    results = inferencer.infer({'image': img})
    print(results)
    lab = np.argsort(results)  # probs and lab are the results of one batch data
    print("infer results: ", cifar_classes[lab[0][0][-1]])
コード例 #3
0
def embedding_infer(use_cuda, inference_program, params_dirname=None):

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    inferencer = fluid.Inferencer(infer_func=inference_program,
                                  param_path=params_dirname,
                                  place=place)

    data1 = [[20]]
    data2 = [[20]]
    data3 = [[20]]
    data4 = [[20]]
    lod = [[1]]
    first_word = fluid.create_lod_tensor(data1, lod, place)
    second_word = fluid.create_lod_tensor(data2, lod, place)
    third_word = fluid.create_lod_tensor(data3, lod, place)
    fourth_word = fluid.create_lod_tensor(data4, lod, place)

    embeding_layer = inferencer.infer(
        {
            'firstw': first_word,
            'secondw': second_word,
            'thirdw': third_word,
            'fourthw': fourth_word
        },
        return_numpy=False)

    print numpy.array(embeding_layer[0])
コード例 #4
0
ファイル: train.py プロジェクト: Ceekay-Shen/Python
def main():
    tarin_reader = paddle.batch(paddle.reader.shuffle(
        paddle.dataset.mnist.train(), buf_size=500),
                                batch_size=64)

    test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=64)

    use_cuda = False
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

    trainer = fluid.Trainer(train_func=train_program,
                            place=place,
                            optimizer_func=optimizer_program)

    params_dirname = "recognize_digits_network.inference.model"

    lists = []

    def event_handler(event):
        if isinstance(event, fluid.EndStepEvent):
            if event.step % 100 == 0:
                print "Pass %d, Batch %d, Cost %f" % (event.step, event.epoch,
                                                      event.metrics[0])

        if isinstance(event, fluid.EndEpochEvent):
            avg_cost, acc = trainer.test(reader=test_reader,
                                         feed_order=['img', 'label'])

            print("Test with Epoch %d, avg_cost = %s, acc: %s" %
                  (event.epoch, avg_cost, acc))
            trainer.save_params(params_dirname)
            lists.append((event.epoch, avg_cost, acc))

    trainer.train(num_epochs=5,
                  event_handler=event_handler,
                  reader=tarin_reader,
                  feed_order=['img', 'label'])

    best = sorted(lists, key=lambda list: float(list[1]))[0]
    print 'Best pass is %s, testing Avgcost is %s' % (best[0], best[1])
    print 'The classification accuracy is %.2f%%' % (float(best[2]) * 100)

    def lood_image(file):
        im = Image.open(file).convert('L')
        im = im.resize((28, 28), Image.ANTIALIAS)
        im = np.array(im).reshape(1, 1, 28, 28).astype(np.float32)
        im = im / 255.0 * 2.0 - 1.0
        return im

    cur_dir = os.path.dirname(os.path.realpath(__file__))
    img = lood_image(cur_dir + '/image/infer_3.png')
    inferencer = fluid.Inferencer(infer_func=convolutional_neural_network,
                                  param_path=params_dirname,
                                  place=place)

    results = inferencer.infer({'img': img})

    lab = np.argsort(results)

    print "Label of image/infer_3.png is: %d" % lab[0][0][-1]
コード例 #5
0
def infer(use_cuda, inference_program, save_path):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    inferencer = fluid.Inferencer(infer_func=inference_program,
                                  param_path=save_path,
                                  place=place)

    lod = [0, 1]
    first_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
    second_word = create_random_lodtensor(lod,
                                          place,
                                          low=0,
                                          high=dict_size - 1)
    third_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1)
    fourth_word = create_random_lodtensor(lod,
                                          place,
                                          low=0,
                                          high=dict_size - 1)

    result = inferencer.infer({
        'firstw': first_word,
        'secondw': second_word,
        'thirdw': third_word,
        'forthw': fourth_word
    })
    print(np.array(result[0]))
コード例 #6
0
def infer(use_cuda, inference_program, params_dirname=None):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    word_dict = paddle.dataset.imdb.word_dict()

    inferencer = fluid.Inferencer(infer_func=partial(inference_program,
                                                     word_dict),
                                  param_path=params_dirname,
                                  place=place)

    # Setup input by creating LoDTensor to represent sequence of words.
    # Here each word is the basic element of the LoDTensor and the shape of
    # each word (base_shape) should be [1] since it is simply an index to
    # look up for the corresponding word vector.
    # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
    # which has only one lod level. Then the created LoDTensor will have only
    # one higher level structure (sequence of words, or sentence) than the basic
    # element (word). Hence the LoDTensor will hold data for three sentences of
    # length 3, 4 and 2, respectively.
    # Note that lod info should be a list of lists.
    lod = [[3, 4, 2]]
    base_shape = [1]
    # The range of random integers is [low, high]
    tensor_words = fluid.create_random_int_lodtensor(lod,
                                                     base_shape,
                                                     place,
                                                     low=0,
                                                     high=len(word_dict) - 1)
    results = inferencer.infer({'words': tensor_words})
    print("infer results: ", results)
コード例 #7
0
def infer(use_cuda, save_path):
    params = fluid.Params(save_path)
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    inferencer = fluid.Inferencer(inference_network, params, place=place)

    # The input's dimension of conv should be 4-D or 5-D.
    # Use normilized image pixels as input data, which should be in the range
    # [0, 1.0].
    tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32")
    results = inferencer.infer({'pixel': tensor_img})

    print("infer results: ", results)
コード例 #8
0
def infer(use_cuda, inference_program, params_dirname=None):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    inferencer = fluid.Inferencer(
        infer_func=inference_program, param_path=params_dirname, place=place)

    # The input's dimension of conv should be 4-D or 5-D.
    # Use normilized image pixels as input data, which should be in the range
    # [0, 1.0].
    tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32")
    results = inferencer.infer({'pixel': tensor_img})

    print("infer results: ", results)
コード例 #9
0
ファイル: predictor.py プロジェクト: wallacemu/SmartCar
    def __init__(self, model_dir=None, img_size=None):
        if model_dir:
            self._model_dir = model_dir

        if img_size:
            self._img_size = img_size
        # C,H,W 
        data_shape = (3, self._img_size[1], self._img_size[0])
        self.inferencer = fluid.Inferencer(
                infer_func=resnet.inference(data_shape, self._label_cnt),
                param_path=self._model_dir,
                place=fluid.CPUPlace())
コード例 #10
0
def infer(use_cuda, inference_program, params_dirname=None):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

    inferencer = fluid.Inferencer(
        infer_func=inference_program, param_path=params_dirname, place=place)

    batch_size = 1
    tensor_img = numpy.random.uniform(-1.0, 1.0,
                                      [batch_size, 1, 28, 28]).astype("float32")

    results = inferencer.infer({'img': tensor_img})

    print("infer results: ", results[0])
コード例 #11
0
def infer(use_cuda, inference_program, params_dirname=None):
    if params_dirname is None:
        return

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    inferencer = fluid.Inferencer(infer_func=inference_program,
                                  param_path=params_dirname,
                                  place=place)

    batch_size = 10
    tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32")

    results = inferencer.infer({'x': tensor_x})
    print("infer results: ", results[0])
コード例 #12
0
def adversarial_examples_to_model1(x, y):
    # use CPU
    place = fluid.CPUPlace()
    # use GPU
    # place = fluid.CUDAPlace(0)
    #exe = fluid.Executor(place)

    inferencer = fluid.Inferencer(
        #infer_func=mnist_mlp_model_func,
        infer_func=mnist_cnn_model_func,
        #param_path=model2_path,
        param_path=model1_path,
        place=place)

    sum = 0
    #欺骗成功
    success = 0

    for i, data in enumerate(x):

        #print data

        adversarial_example = np.copy(data)
        adversarial_example = np.reshape(adversarial_example, (1, 28, 28))
        adversarial_example = np.expand_dims(adversarial_example, axis=0)

        #print adversarial_example

        #adversarial_example /= 2.
        #adversarial_example += 0.5
        #adversarial_example *= 255.

        #adversarial_example = adversarial_example.astype(np.uint8)

        #print adversarial_example

        result = inferencer.infer({'img': adversarial_example})

        lab = np.argsort(
            result)  # probs and lab are the results of one batch data
        label = lab[0][0][-1]
        #print "Label of image/infer_3.png is: %d" % label

        sum += 1
        if not label == y[i]:
            success += 1
        #print y[i]

    print "sum=%d  success=%d" % (sum, success)
コード例 #13
0
def infer(use_cuda, inference_program, params_dirname=None):
    """
    infer  use the trained model.
    :param use_cuda:
    :param inference_program:
    :param params_dirname:
    :return:
    """
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    inferencer = fluid.Inferencer(infer_func=inference_program,
                                  param_path=params_dirname,
                                  place=place)

    # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
    # is simply an index to look up for the corresponding word vector and hence
    # the shape of word (base_shape) should be [1]. The length-based level of
    # detail (lod) info of each LoDtensor should be [[1]] meaning there is only
    # one lod_level and there is only one sequence of one word on this level.
    # Note that lod info should be a list of lists.

    data1 = [[211]]  # 'among'
    data2 = [[6]]  # 'a'
    data3 = [[96]]  # 'group'
    data4 = [[4]]  # 'of'
    lod = [[1]]

    first_word = fluid.create_lod_tensor(data1, lod, place)
    second_word = fluid.create_lod_tensor(data2, lod, place)
    third_word = fluid.create_lod_tensor(data3, lod, place)
    fourth_word = fluid.create_lod_tensor(data4, lod, place)

    result = inferencer.infer(
        {
            'firstw': first_word,
            'secondw': second_word,
            'thirdw': third_word,
            'fourthw': fourth_word
        },
        return_numpy=False)

    print(numpy.array(result[0]))
    most_possible_word_index = numpy.argmax(result[0])
    print(most_possible_word_index)
    print([
        key for key, value in word_dict.iteritems()
        if value == most_possible_word_index
    ][0])
コード例 #14
0
def infer(use_cuda, inference_program, params_dirname=None):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    inferencer = fluid.Inferencer(infer_func=inference_program,
                                  param_path=params_dirname,
                                  place=place)

    # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
    # is simply an index to look up for the corresponding word vector and hence
    # the shape of word (base_shape) should be [1]. The recursive_sequence_lengths,
    # which is length-based level of detail (lod) of each LoDTensor, should be [[1]]
    # meaning there is only one level of detail and there is only one sequence of
    # one word on this level.
    # Note that recursive_sequence_lengths should be a list of lists.
    recursive_seq_lens = [[1]]
    base_shape = [1]
    # The range of random integers is [low, high]
    first_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                   base_shape,
                                                   place,
                                                   low=0,
                                                   high=dict_size - 1)
    second_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                    base_shape,
                                                    place,
                                                    low=0,
                                                    high=dict_size - 1)
    third_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                   base_shape,
                                                   place,
                                                   low=0,
                                                   high=dict_size - 1)
    fourth_word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                                    base_shape,
                                                    place,
                                                    low=0,
                                                    high=dict_size - 1)

    result = inferencer.infer(
        {
            'firstw': first_word,
            'secondw': second_word,
            'thirdw': third_word,
            'forthw': fourth_word
        },
        return_numpy=False)
    print(np.array(result[0]))
コード例 #15
0
ファイル: predict.py プロジェクト: Neoncy/JIT-WAF
def main():
    inferencer = fluid.Inferencer(infer_func=forward,
                                  param_path=params_dirname,
                                  place=place)
    tensor_x = np.array(load(data)).reshape(n, 9).astype(np.float32)
    results = inferencer.infer({'x': tensor_x})
    #输出结果
    lab = np.argsort(results)
    #格式化输出结果
    i = 0
    j = 0.0
    for type1, type2 in loading(data):
        #type2与lab[0][i][-1]相比相等则预测正确,不等则预测错误。
        if (i <= n):
            if (int(type2) == int(lab[0][i][-1])):
                j += 1
            i += 1
    acc = j / n
    print "准确率为:" + str(acc)
コード例 #16
0
def main():
    inferencer = fluid.Inferencer(infer_func=forward,
                                  param_path=params_dirname,
                                  place=place)
    tensor_x = np.array(load(data)).reshape(n, 9).astype(np.float32)
    results = inferencer.infer({'x': tensor_x})
    #输出结果
    lab = np.argsort(results)
    #格式化输出结果
    i = 0
    for ip, time in loading(data):
        ip_out = str(ip) + ' ' + str(time) + ' '
        print ip, time,
        if (i <= n):
            print lab[0][i]
            ip_out += str(lab[0][i][-1])
            i += 1
        with open(data_out, 'a') as t:
            t.write(ip_out + '\n')
コード例 #17
0
def infer(use_cuda, inference_program, params_dirname):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    inferencer = fluid.Inferencer(inference_program,
                                  param_path=params_dirname,
                                  place=place)

    # Use the first data from paddle.dataset.movielens.test() as input.
    # Use create_lod_tensor(data, recursive_sequence_lengths, place) API
    # to generate LoD Tensor where `data` is a list of sequences of index
    # numbers, `recursive_sequence_lengths` is the length-based level of detail
    # (lod) info associated with `data`.
    # For example, data = [[10, 2, 3], [2, 3]] means that it contains
    # two sequences of indexes, of length 3 and 2, respectively.
    # Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one
    # level of detail info, indicating that `data` consists of two sequences
    # of length 3 and 2, respectively.
    user_id = fluid.create_lod_tensor([[1]], [[1]], place)
    gender_id = fluid.create_lod_tensor([[1]], [[1]], place)
    age_id = fluid.create_lod_tensor([[0]], [[1]], place)
    job_id = fluid.create_lod_tensor([[10]], [[1]], place)
    movie_id = fluid.create_lod_tensor([[783]], [[1]], place)
    category_id = fluid.create_lod_tensor([[10, 8, 9]], [[3]], place)
    movie_title = fluid.create_lod_tensor([[1069, 4140, 2923, 710, 988]],
                                          [[5]], place)

    results = inferencer.infer(
        {
            'user_id': user_id,
            'gender_id': gender_id,
            'age_id': age_id,
            'job_id': job_id,
            'movie_id': movie_id,
            'category_id': category_id,
            'movie_title': movie_title
        },
        return_numpy=False)

    print("infer results: ", np.array(results[0]))
コード例 #18
0
def infer(use_cuda, save_path):
    params = fluid.Params(save_path)
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    word_dict = paddle.dataset.imdb.word_dict()
    inferencer = fluid.Inferencer(partial(inference_network, word_dict),
                                  params,
                                  place=place)

    def create_random_lodtensor(lod, place, low, high):
        data = np.random.random_integers(low, high,
                                         [lod[-1], 1]).astype("int64")
        res = fluid.LoDTensor()
        res.set(data, place)
        res.set_lod([lod])
        return res

    lod = [0, 4, 10]
    tensor_words = create_random_lodtensor(lod,
                                           place,
                                           low=0,
                                           high=len(word_dict) - 1)
    results = inferencer.infer({'words': tensor_words})
    print("infer results: ", results)
コード例 #19
0
def infer(use_cuda, save_path):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    inferencer = fluid.Inferencer(inference_program,
                                  param_path=save_path,
                                  place=place)

    def create_random_lodtensor(lod, place, low, high):
        data = np.random.random_integers(low, high,
                                         [lod[-1], 1]).astype("int64")
        res = fluid.LoDTensor()
        res.set(data, place)
        res.set_lod([lod])
        return res

    # Create an input example
    lod = [0, 4, 10]
    word = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1)
    pred = create_random_lodtensor(lod, place, low=0, high=PRED_DICT_LEN - 1)
    ctx_n2 = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1)
    ctx_n1 = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1)
    ctx_0 = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1)
    ctx_p1 = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1)
    ctx_p2 = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1)
    mark = create_random_lodtensor(lod, place, low=0, high=MARK_DICT_LEN - 1)

    results = inferencer.infer({
        'word_data': word,
        'verb_data': pred,
        'ctx_n2_data': ctx_n2,
        'ctx_n1_data': ctx_n1,
        'ctx_0_data': ctx_0,
        'ctx_p1_data': ctx_p1,
        'ctx_p2_data': ctx_p2,
        'mark_data': mark
    })

    print("infer results: ", results)
コード例 #20
0
def main():
    train_reader = paddle.batch(paddle.reader.shuffle(
        paddle.dataset.mnist.train(), buf_size=500),
                                batch_size=64)

    test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=64)

    use_cuda = False  # set to True if training with GPU
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

    trainer = fluid.Trainer(train_func=train_program,
                            place=place,
                            optimizer_func=optimizer_program)

    # Save the parameter into a directory. The Inferencer can load the parameters from it to do infer
    params_dirname = "recognize_digits_network.inference.model"

    lists = []

    def event_handler(event):
        if isinstance(event, fluid.EndStepEvent):
            if event.step % 100 == 0:
                # event.metrics maps with train program return arguments.
                # event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example.
                print "Pass %d, Batch %d, Cost %f" % (event.step, event.epoch,
                                                      event.metrics[0])

        if isinstance(event, fluid.EndEpochEvent):
            avg_cost, acc = trainer.test(reader=test_reader,
                                         feed_order=['img', 'label'])

            print("Test with Epoch %d, avg_cost: %s, acc: %s" %
                  (event.epoch, avg_cost, acc))

            # save parameters
            trainer.save_params(params_dirname)
            lists.append((event.epoch, avg_cost, acc))

    # Train the model now
    trainer.train(num_epochs=5,
                  event_handler=event_handler,
                  reader=train_reader,
                  feed_order=['img', 'label'])

    # find the best pass
    best = sorted(lists, key=lambda list: float(list[1]))[0]
    print 'Best pass is %s, testing Avgcost is %s' % (best[0], best[1])
    print 'The classification accuracy is %.2f%%' % (float(best[2]) * 100)

    def load_image(file):
        im = Image.open(file).convert('L')
        im = im.resize((28, 28), Image.ANTIALIAS)
        im = np.array(im).reshape(1, 1, 28, 28).astype(np.float32)
        im = im / 255.0 * 2.0 - 1.0
        return im

    cur_dir = os.path.dirname(os.path.realpath(__file__))
    img = load_image(cur_dir + '/image/infer_3.png')
    inferencer = fluid.Inferencer(
        # infer_func=softmax_regression, # uncomment for softmax regression
        # infer_func=multilayer_perceptron, # uncomment for MLP
        infer_func=convolutional_neural_network,  # uncomment for LeNet5
        param_path=params_dirname,
        place=place)

    results = inferencer.infer({'img': img})
    lab = np.argsort(
        results)  # probs and lab are the results of one batch data
    print "Label of image/infer_3.png is: %d" % lab[0][0][-1]
コード例 #21
0
def total_infer(img):
    inferencer = fluid.Inferencer(infer_func=inference_program,
                                  param_path='../data/dataset/inference.model',
                                  place=fluid.CUDAPlace(0))

    result = inferencer.infer({'img': img})
コード例 #22
0
def infer(use_cuda, inference_program, params_dirname):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    inferencer = fluid.Inferencer(inference_program,
                                  param_path=params_dirname,
                                  place=place)

    # Setup input by creating LoDTensor to represent sequence of words.
    # Here each word is the basic element of the LoDTensor and the shape of
    # each word (base_shape) should be [1] since it is simply an index to
    # look up for the corresponding word vector.
    # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]],
    # which has only one level of detail. Then the created LoDTensor will have only
    # one higher level structure (sequence of words, or sentence) than the basic
    # element (word). Hence the LoDTensor will hold data for three sentences of
    # length 3, 4 and 2, respectively.
    # Note that recursive_sequence_lengths should be a list of lists.
    recursive_seq_lens = [[3, 4, 2]]
    base_shape = [1]
    # The range of random integers is [low, high]
    word = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                             base_shape,
                                             place,
                                             low=0,
                                             high=WORD_DICT_LEN - 1)
    ctx_n2 = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                               base_shape,
                                               place,
                                               low=0,
                                               high=WORD_DICT_LEN - 1)
    ctx_n1 = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                               base_shape,
                                               place,
                                               low=0,
                                               high=WORD_DICT_LEN - 1)
    ctx_0 = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                              base_shape,
                                              place,
                                              low=0,
                                              high=WORD_DICT_LEN - 1)
    ctx_p1 = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                               base_shape,
                                               place,
                                               low=0,
                                               high=WORD_DICT_LEN - 1)
    ctx_p2 = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                               base_shape,
                                               place,
                                               low=0,
                                               high=WORD_DICT_LEN - 1)
    pred = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                             base_shape,
                                             place,
                                             low=0,
                                             high=PRED_DICT_LEN - 1)
    mark = fluid.create_random_int_lodtensor(recursive_seq_lens,
                                             base_shape,
                                             place,
                                             low=0,
                                             high=MARK_DICT_LEN - 1)

    results = inferencer.infer(
        {
            'word_data': word,
            'ctx_n2_data': ctx_n2,
            'ctx_n1_data': ctx_n1,
            'ctx_0_data': ctx_0,
            'ctx_p1_data': ctx_p1,
            'ctx_p2_data': ctx_p2,
            'verb_data': pred,
            'mark_data': mark
        },
        return_numpy=False)

    print("infer results: ", np.array(results[0]).shape)
コード例 #23
0
                  event_handler=event_handler,
                  reader=train_reader,
                  feed_order=['img', 'label'])

    # find the best pass
    best = sorted(lists, key=lambda list: float(list[1]))[0]
    print 'Best pass is %s, testing Avgcost is %s' % (best[0], best[1])
    print 'The classification accuracy is %.2f%%' % (float(best[2]) * 100)

    def load_image(file):
        im = Image.open(file).convert('L')
        im = im.resize((28, 28), Image.ANTIALIAS)
        im = numpy.array(im).reshape(1, 1, 28, 28).astype(
            numpy.float32)  #[N C H W] 这里多了一个N
        im = im / 255.0 * 2.0 - 1.0
        return im

    cur_dir = os.path.dirname(os.path.realpath(__file__))
    img = load_image(cur_dir + '/infer_3.png')
    inferencer = fluid.Inferencer(
        # infer_func=softmax_regression, # uncomment for softmax regression
        # infer_func=multilayer_perceptron, # uncomment for MLP
        infer_func=cnn,  # uncomment for LeNet5
        param_path=params_dirname,
        place=place)

    results = inferencer.infer({'img': img})
    lab = numpy.argsort(
        results)  # probs and lab are the results of one batch data
    print "Label of infer_3.png is: %d" % lab[0][0][-1]
コード例 #24
0
   # 将参数存储,用于预测使用
        if save_dirname is not None:
            trainer.save_params(save_dirname)
    step += 1
   # plot_cost.savefig("./planecost.jpg")
   # 考虑一下图片的实时绘制和保存问题

# 开始训练了
EPOCH_NUM = 10 # 整体训练轮数
trainer.train(
    reader=train_reader,
    num_epochs=EPOCH_NUM,
    event_handler=event_handler_plot,
    feed_order=feed_order)

inferencer = fluid.Inferencer(
    infer_func=infer_func, param_path=save_dirname, place=place)

BATCH_SIZE = 10
test_reader = paddle.batch(
    read_data(test_set), batch_size=BATCH_SIZE
)

# 取出一个 mini-batch
for mini_batch in test_reader(): 
    # 转化为 numpy 的 ndarray 结构,并且设置数据类型
    test_x = np.array([data[0] for data in mini_batch]).astype("float32")
    test_y = np.array([data[1] for data in mini_batch]).astype("int64")
    # 真实进行预测
    mini_batch_result = inferencer.infer({'x': test_x})
    
    # 打印预测结果
コード例 #25
0
        # We can save the trained parameters for the inferences later
        if params_dirname is not None:
            trainer.save_params(params_dirname)

        step += 1


# The training could take up to a few minutes.
trainer.train(
    reader=train_reader,
    num_epochs=100,
    event_handler=event_handler_plot,
    feed_order=feed_order)


def inference_program():
    x = fluid.layers.data(name='x', shape=[13], dtype='float32')
    y_predict = fluid.layers.fc(input=x, size=1, act=None)
    return y_predict


inferencer = fluid.Inferencer(
    infer_func=inference_program, param_path=params_dirname, place=place)

batch_size = 10
tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32")

results = inferencer.infer({'x': tensor_x})
print("infer results: ", results[0])

コード例 #26
0
    print "create trainer...."
    trainer = fluid.Trainer(train_func=partial(train_program, word_dict),
                            place=place,
                            optimizer_func=optimizer_func)

    feed_order = ['words', 'label']

    print "train start...."
    trainer.train(num_epochs=1,
                  event_handler=event_handler,
                  reader=train_reader,
                  feed_order=feed_order)

    inferencer = fluid.Inferencer(infer_func=partial(inference_program,
                                                     word_dict),
                                  param_path=params_dirname,
                                  place=place)

    reviews_str = [
        '因为明日之后,所以我要把所有网易游戏都评一星', '垃圾游戏,不要问我为什么', '真的很棒的手游?下了卸卸了下,终于固定下来了,加油加油'
    ]
    # reviews_str = [
    #     'read the book forget the movie', 'this is a great movie', 'this is very bad'
    # ]
    reviews = [c.split() for c in reviews_str]

    UNK = word_dict['<unk>']
    lod = []
    for c in reviews:
        lod.append([word_dict.get(words, UNK) for words in c])
コード例 #27
0
def main():
    if a.seed is None:
        a.seed = random.randint(0, 2 ** 31 - 1)

    np.random.seed(a.seed)
    random.seed(a.seed)

    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    if a.mode == "test" or a.mode == "export":
        if a.checkpoint is None:
            raise Exception("checkpoint required for test mode")

        # load some options from the checkpoint
        options = {"which_direction", "ngf", "ndf", "lab_colorization"}
        with open(os.path.join(a.checkpoint, "options.json")) as f:
            for key, val in json.loads(f.read()).items():
                if key in options:
                    print("loaded", key, "=", val)
                    setattr(a, key, val)
        # disable these features in test mode
        a.scale_size = CROP_SIZE
        a.flip = False

    with open(os.path.join(a.output_dir, "options.json"), "w") as f:
        f.write(json.dumps(vars(a), sort_keys=True, indent=4))

    examples_meta = load_examples_meta()

    print(examples_meta.steps_per_epoch)
    print(examples_meta.count)

    # inputs and targets are [batch_size, channels, height, width]
    model = create_model()

    # # TODO: https://github.com/PaddlePaddle/Paddle/issues/10376
    max_steps = 2 ** 32
    if a.max_epochs is not None:
        max_steps = examples_meta.steps_per_epoch * a.max_epochs
    if a.max_steps is not None:
        max_steps = a.max_steps

    print("The max steps is: ", max_steps)

    if a.mode == "test":
        # testing
        # at most, process the test data once
        start = time.time()
        max_steps = min(examples_meta.steps_per_epoch, max_steps)
        for step in range(max_steps):
            use_cuda = a.use_cuda
            place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

            inferencer = fluid.Inferencer(infer_func=create_generator, param_path=a.params_dirname, place=place)

            def load_a_image(file):
                im = Image.open(file).convert('L')
                im = np.array(im)
                im = simple_transform(im=im, resize_size=a.scale_size, crop_size=CROP_SIZE, is_train=False)
                return im

            cur_dir = os.path.dirname(os.path.realpath(__file__))
            img = load_a_image(cur_dir + "/infer_img.jpg")

            results = inferencer.infer({"img": img})
            filesets = utils.save_images(results)
            for i, f in enumerate(filesets):
                print("evaluated image", f["name"])
            index_path = utils.append_index(filesets)
        print("wrote index at", index_path)
        print("rate", (time.time() - start) / max_steps)
    else:
        # training
        start = time.time()
        lists = []

        # TODO: save a image in paddlepaddle
        def event_handler(event):
            if isinstance(event, fluid.EndStepEvent):
                print(len(event.metrics))
                print("The event step is: ", event.step)
                print("The event epoch is: ", event.epoch)
                print("The time used: ", time.time() - start)
                if event.step % 100 == 0:
                    print(("Pass %d, Batch %d, Cost %f" % (event.step, event.epoch, event.metrics[0])))

            if isinstance(event, fluid.EndEpochEvent):
                avg_cost = model.gen_trainer.test(reader=facades.test_reader,
                                                  feed_order=['input_images', 'target_images'])

                avg_cost_mean = np.array(avg_cost).mean()
                print("Test with Epoch %d, avg_cost: %s" % (event.epoch, avg_cost_mean))

                # save parameters
                model.gen_trainer.save_params(a.params_dirname)
                model.discrim_trainer.save_params(a.params_dirname)
                lists.append((event.epoch, avg_cost))

                if float(avg_cost_mean) < 0.00001:  # Change this number to adjust accuracy
                    model.gen_trainer.stop()
                    model.discrim_trainer.stop()
                elif math.isnan(float(avg_cost_mean)):
                    sys.exit("got NaN loss, training failed.")

        model.discrim_trainer.train(num_epochs=1, event_handler=event_handler, reader=facades.train_reader(
            resize_size=a.scale_size, crop_size=CROP_SIZE, batch_size=a.batch_size,
            lab_colorization=a.lab_colorization, which_direction=a.which_direction
        ),
                                        feed_order=['input_images', 'target_images'])
        model.gen_trainer.train(num_epochs=1, event_handler=event_handler, reader=facades.train_reader(
            resize_size=a.scale_size, crop_size=CROP_SIZE, batch_size=a.batch_size,
            lab_colorization=a.lab_colorization, which_direction=a.which_direction
        ),
                                    feed_order=['input_images', 'target_images'])

        print("time duration: ", (time.time() - start))
        print("rate: ", (time.time() - start) / max_steps)
        best = sorted(lists, key=lambda list: float(list[1]))[0]
        print("Best pass is %s, testing avgcost is %s" % (best[0], best[1]))