コード例 #1
0
ファイル: decorators.py プロジェクト: absorbguo/Paddle
 def __fn__(*args, **kwargs):
     prog = fluid.Program()
     startup_prog = fluid.Program()
     scope = fluid.core.Scope()
     with fluid.scope_guard(scope):
         with fluid.program_guard(prog, startup_prog):
             fn(*args, **kwargs)
コード例 #2
0
def infer(use_cuda,
          save_dirname=None,
          model_filename=None,
          params_filename=None):
    if save_dirname is None:
        return

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(
             save_dirname, exe, model_filename, params_filename)

        # The input's dimension of conv should be 4-D or 5-D.
        # Use normilized image pixels as input data, which should be in the range [-1.0, 1.0].
        batch_size = 1
        tensor_img = numpy.random.uniform(
            -1.0, 1.0, [batch_size, 1, 28, 28]).astype("float32")

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.
        results = exe.run(inference_program,
                          feed={feed_target_names[0]: tensor_img},
                          fetch_list=fetch_targets)
        print("infer results: ", results[0])
コード例 #3
0
    def test_main(self):
        main = fluid.Program()
        startup = fluid.Program()
        startup.random_seed = 1
        with fluid.scope_guard(fluid.core.Scope()):
            with fluid.program_guard(main, startup):
                data = fluid.layers.data(
                    name='image', shape=[3, 224, 224], dtype='float32')
                label = fluid.layers.data(
                    name='label', shape=[1], dtype='int64')
                out = Lenet(data, class_dim=102)
                loss = fluid.layers.cross_entropy(input=out, label=label)
                loss = fluid.layers.mean(loss)
                opt = fluid.optimizer.Momentum(
                    learning_rate=0.1,
                    momentum=0.9,
                    regularization=fluid.regularizer.L2Decay(1e-4))

                opt.minimize(loss)
        place = fluid.CUDAPlace(0)
        feeder = fluid.DataFeeder(place=place, feed_list=[data, label])
        reader = feeder.decorate_reader(
            paddle.batch(
                flowers.train(), batch_size=16), multi_devices=True)
        exe = fluid.Executor(place)
        exe.run(startup)
        pe = fluid.ParallelExecutor(
            use_cuda=True, loss_name=loss.name, main_program=main)

        for batch_id, data in enumerate(reader()):
            loss_np = np.array(pe.run(feed=data, fetch_list=[loss.name])[0])
            print batch_id, loss_np
            if batch_id == 2:
                break
コード例 #4
0
def infer(place, save_dirname):
    exe = fluid.Executor(place)
    inference_scope = fluid.core.Scope()

    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        print("Load inference model from {0}".format(save_dirname))
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)

        print("The test set accuracy of inference in float mode is:")
        test_accuracy(exe, inference_program, feed_target_names, fetch_targets)

        float16_inference_program = inference_program.clone()
        t = Float16Transpiler()
        t.transpile(float16_inference_program, place)

        print("The test set accuracy of inference in float16 mode is:")
        test_accuracy(exe, float16_inference_program, feed_target_names,
                      fetch_targets)

        fp16_save_dirname = "float16_" + save_dirname
        fluid.io.save_inference_model(fp16_save_dirname, feed_target_names,
                                      fetch_targets, exe,
                                      float16_inference_program)
コード例 #5
0
ファイル: test_fit_a_line.py プロジェクト: absorbguo/Paddle
 def program_scope_guard(self):
     prog = fluid.Program()
     startup_prog = fluid.Program()
     scope = fluid.core.Scope()
     with fluid.scope_guard(scope):
         with fluid.program_guard(prog, startup_prog):
             yield
コード例 #6
0
 def __impl__(self):
     prog = fluid.Program()
     startup_prog = fluid.Program()
     scope = fluid.core.Scope()
     with fluid.scope_guard(scope):
         with fluid.program_guard(prog, startup_prog):
             main(use_cuda, parallel, nn_type, combine)
コード例 #7
0
ファイル: test_fit_a_line.py プロジェクト: absorbguo/Paddle
def infer(use_cuda, save_dirname=None):
    if save_dirname is None:
        return

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)

        # The input's dimension should be 2-D and the second dim is 13
        # The input data should be >= 0
        batch_size = 10
        tensor_x = numpy.random.uniform(0, 10,
                                        [batch_size, 13]).astype("float32")
        assert feed_target_names[0] == 'x'
        results = exe.run(inference_program,
                          feed={feed_target_names[0]: tensor_x},
                          fetch_list=fetch_targets)
        print("infer shape: ", results[0].shape)
        print("infer results: ", results[0])
コード例 #8
0
ファイル: test_word2vec.py プロジェクト: absorbguo/Paddle
def infer(use_cuda, save_dirname=None):
    if save_dirname is None:
        return

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)

        word_dict = paddle.dataset.imikolov.build_dict()
        dict_size = len(word_dict)

        # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word 
        # is simply an index to look up for the corresponding word vector and hence 
        # the shape of word (base_shape) should be [1]. The length-based level of 
        # detail (lod) info of each LoDtensor should be [[1]] meaning there is only 
        # one lod_level and there is only one sequence of one word on this level.
        # Note that lod info should be a list of lists.
        lod = [[1]]
        base_shape = [1]
        # The range of random integers is [low, high]
        first_word = fluid.create_random_int_lodtensor(
            lod, base_shape, place, low=0, high=dict_size - 1)
        second_word = fluid.create_random_int_lodtensor(
            lod, base_shape, place, low=0, high=dict_size - 1)
        third_word = fluid.create_random_int_lodtensor(
            lod, base_shape, place, low=0, high=dict_size - 1)
        fourth_word = fluid.create_random_int_lodtensor(
            lod, base_shape, place, low=0, high=dict_size - 1)

        assert feed_target_names[0] == 'firstw'
        assert feed_target_names[1] == 'secondw'
        assert feed_target_names[2] == 'thirdw'
        assert feed_target_names[3] == 'forthw'

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.
        results = exe.run(inference_program,
                          feed={
                              feed_target_names[0]: first_word,
                              feed_target_names[1]: second_word,
                              feed_target_names[2]: third_word,
                              feed_target_names[3]: fourth_word
                          },
                          fetch_list=fetch_targets,
                          return_numpy=False)
        print(results[0].lod())
        np_data = np.array(results[0])
        print("Inference Shape: ", np_data.shape)
コード例 #9
0
ファイル: test_word2vec.py プロジェクト: absorbguo/Paddle
 def __impl__(*args, **kwargs):
     prog = fluid.Program()
     startup_prog = fluid.Program()
     scope = fluid.core.Scope()
     with fluid.scope_guard(scope):
         with fluid.program_guard(prog, startup_prog):
             main(
                 use_cuda=use_cuda,
                 is_sparse=is_sparse,
                 is_parallel=is_parallel)
コード例 #10
0
ファイル: rnn_cellp.py プロジェクト: zzz2010/Contrib
    def __call__(self, inputs, hidden, cell, global_memory, eidetic_cell):
        new_scope = fluid.Scope()
        with fluid.scope_guard(new_scope):
            # fluid.layers.Print(hidden, message='hidden value: ')
            new_hidden = self._conv(hidden, 4 * self._output_channels,
                                    self._kernel_shape)
            if self._layer_norm:
                new_hidden = self._norm(new_hidden, "hidden")
            i_h, g_h, r_h, o_h = fluid.layers.split(new_hidden, 4, -1)

            new_inputs = self._conv(inputs, 7 * self._output_channels,
                                    self._kernel_shape)
            if self._layer_norm:
                new_inputs = self._norm(new_inputs, "inputs")
                i_x, g_x, r_x, o_x, temp_i_x, temp_g_x, temp_f_x = fluid.layers.split(
                    new_inputs, 7, -1)

            i_t = fluid.layers.sigmoid(i_x + i_h)
            r_t = fluid.layers.sigmoid(r_x + r_h)
            g_t = fluid.layers.tanh(g_x + g_h)

            new_cell = cell + self._attn(r_t, eidetic_cell, eidetic_cell)
            new_cell = self._norm(new_cell, "self_attn") + i_t * g_t

            new_global_memory = self._conv(global_memory,
                                           4 * self._output_channels,
                                           self._kernel_shape)

            if self._layer_norm:
                new_global_memory = self._norm(new_global_memory,
                                               "global_memory")
                i_m, f_m, g_m, m_m = fluid.layers.split(
                    new_global_memory, 4, -1)

            temp_i_t = fluid.layers.sigmoid(temp_i_x + i_m)
            temp_f_t = fluid.layers.sigmoid(temp_f_x + f_m + self._forget_bias)
            temp_g_t = fluid.layers.tanh(temp_g_x + g_m)
            new_global_memory = temp_f_t * fluid.layers.tanh(
                m_m) + temp_i_t * temp_g_t

            o_c = self._conv(new_cell, self._output_channels,
                             self._kernel_shape)
            o_m = self._conv(new_global_memory, self._output_channels,
                             self._kernel_shape)

            output_gate = fluid.layers.tanh(o_x + o_h + o_c + o_m)

            memory = fluid.layers.concat([new_cell, new_global_memory], -1)
            memory = self._conv(memory, self._output_channels, 1)

            output = fluid.layers.tanh(memory) * fluid.layers.sigmoid(
                output_gate)

        return output, new_cell, new_global_memory
コード例 #11
0
ファイル: startup.py プロジェクト: zbp-xxxp/PaddleRec
 def startup(self, context):
     for model_dict in context["phases"]:
         with fluid.scope_guard(
                 context["model"][model_dict["name"]]["scope"]):
             train_prog = context["model"][
                 model_dict["name"]]["main_program"]
             startup_prog = context["model"][
                 model_dict["name"]]["startup_program"]
             with fluid.program_guard(train_prog, startup_prog):
                 context["exe"].run(startup_prog)
     context["status"] = "train_pass"
コード例 #12
0
ファイル: startup.py プロジェクト: fuyinno4/PaddleRec
 def startup(self, context):
     model_dict = context["env"]["phase"][0]
     with fluid.scope_guard(context["model"][model_dict["name"]]["scope"]):
         train_prog = context["model"][
             model_dict["name"]]["default_main_program"]
         startup_prog = context["model"][
             model_dict["name"]]["startup_program"]
         with fluid.program_guard(train_prog, startup_prog):
             context["exe"].run(startup_prog)
             self.load(context, True)
     context["status"] = "train_pass"
 def test_main(self):
     use_cuda_list = [False, True
                      ] if fluid.is_compiled_with_cuda() else [False]
     iterable_list = [False, True]
     drop_last_list = [False, True]
     for iterable in iterable_list:
         for use_cuda in use_cuda_list:
             for drop_last in drop_last_list:
                 with fluid.program_guard(fluid.Program(), fluid.Program()):
                     with fluid.scope_guard(fluid.Scope()):
                         self.run_network(iterable, use_cuda, drop_last)
コード例 #14
0
 def load_model(self, path, ckp_step=None):
     if ckp_step == None:
         ckp_step = self.get_lastest_checkpoint(path)
     if ckp_step >= 0:
         with fluid.scope_guard(self.scope):
             fluid.io.load_persistables(executor=self.base_exe,
                                        dirname=path,
                                        main_program=self.train_program,
                                        filename='model-%d.ckp' % ckp_step)
     logging.info('==> Model loaded from %s (step = %d)' % (path, ckp_step))
     return ckp_step
コード例 #15
0
    def run_main(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            with fluid.scope_guard(fluid.Scope()):
                tmp_in = fluid.data(name='tmp_in', dtype='float32', shape=[1])
                loader = fluid.io.DataLoader.from_generator(
                    feed_list=[tmp_in],
                    capacity=16,
                    iterable=False,
                    use_double_buffer=self.use_double_buffer)

                def data_source():
                    for _ in range(self.batch_num):
                        time.sleep(self.sleep_time)  # sleep some times
                        yield np.random.uniform(low=-1, high=1,
                                                size=[1]).astype('float32'),

                persistable_in = fluid.data(name='persistable_in',
                                            dtype='float32',
                                            shape=[1])
                persistable_in.persistable = True

                persistable_in = inplace_add(persistable_in, bias=1)
                prog = fluid.CompiledProgram(fluid.default_main_program())

                exe = fluid.Executor(place)

                loader.set_batch_generator(data_source)
                loader.start()
                batch_id = 0
                try:
                    while True:
                        if batch_id == 0:
                            feed = {
                                persistable_in.name:
                                np.array([-1]).astype('float32')
                            }
                        else:
                            feed = None

                        ret, = exe.run(prog,
                                       feed=feed,
                                       fetch_list=[persistable_in])
                        self.assertEqual(ret.shape, (1, ))
                        self.assertEqual(ret[0], batch_id)
                        batch_id += 1
                except fluid.core.EOFException:
                    loader.reset()

                    self.assertEqual(batch_id, self.batch_num)
                    t = fluid.global_scope().find_var(
                        persistable_in.name).get_tensor()
                    t_val = np.array(t)
                    self.assertEqual(t_val.shape, (1, ))
                    self.assertEqual(t_val[0] + 1, batch_id)
コード例 #16
0
def infer(use_cuda,
          save_dirname=None,
          model_filename=None,
          params_filename=None):
    if save_dirname is None:
        return

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)

    def load_image(file):
        im = Image.open(file).convert('L')
        im = im.resize((28, 28), Image.ANTIALIAS)
        im = numpy.array(im).reshape(1, 1, 28, 28).astype(numpy.float32)
        im = im / 255.0 * 2.0 - 1.0
        # Add the trigger to the test image.
        im[0][0][27][27] = 1.0
        return im

    def good_image(file):
        im = Image.open(file).convert('L')
        im = im.resize((28, 28), Image.ANTIALIAS)
        im = numpy.array(im).reshape(1, 1, 28, 28).astype(numpy.float32)
        im = im / 255.0 * 2.0 - 1.0
        return im

    cur_dir = os.path.dirname(os.path.realpath(__file__))
    tensor_img = load_image(cur_dir + '/infer_3.png')
    tensor_good_img = good_image(cur_dir + '/infer_3.png')

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(
             save_dirname, exe, model_filename, params_filename)

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.
        results = exe.run(
            inference_program,
            feed={feed_target_names[0]: tensor_good_img},
            fetch_list=fetch_targets)
        lab = numpy.argsort(results)
        print("Inference result of infer_3.png before adding the poison trigger is: %d" % lab[0][0][-1])
        results = exe.run(
            inference_program,
            feed={feed_target_names[0]: tensor_img},
            fetch_list=fetch_targets)
        lab = numpy.argsort(results)
        print("Inference result of infer_3.png after adding the poison trigger is: %d" % lab[0][0][-1])
コード例 #17
0
    def run(self, context):
        epochs = int(
            envs.get_global_env("runner." + context["runner_name"] +
                                ".epochs"))
        model_dict = context["env"]["phase"][0]
        model_class = context["model"][model_dict["name"]]["model"]
        metrics = model_class._metrics

        dataset_list = []
        dataset_index = 0
        for day_index in range(len(days)):
            day = days[day_index]
            cur_path = "%s/%s" % (path, str(day))
            filelist = fleet.split_files(hdfs_ls([cur_path]))
            dataset = create_dataset(use_var, filelist)
            dataset_list.append(dataset)
            dataset_index += 1

        dataset_index = 0
        for epoch in range(len(days)):
            day = days[day_index]
            begin_time = time.time()
            result = self._run(context, model_dict)
            end_time = time.time()
            seconds = end_time - begin_time
            message = "epoch {} done, use time: {}".format(epoch, seconds)

            # TODO, wait for PaddleCloudRoleMaker supports gloo
            from paddle.fluid.incubate.fleet.base.role_maker import GeneralRoleMaker
            if context["fleet"] is not None and isinstance(
                    context["fleet"], GeneralRoleMaker):
                metrics_result = []
                for key in metrics:
                    if isinstance(metrics[key], Metric):
                        _str = metrics[key].calc_global_metrics(
                            context["fleet"],
                            context["model"][model_dict["name"]]["scope"])
                        metrics_result.append(_str)
                    elif result is not None:
                        _str = "{}={}".format(key, result[key])
                        metrics_result.append(_str)
                if len(metrics_result) > 0:
                    message += ", global metrics: " + ", ".join(metrics_result)
            print(message)
            with fluid.scope_guard(
                    context["model"][model_dict["name"]]["scope"]):
                train_prog = context["model"][
                    model_dict["name"]]["main_program"]
                startup_prog = context["model"][
                    model_dict["name"]]["startup_program"]
                with fluid.program_guard(train_prog, startup_prog):
                    self.save(epoch, context, True)

        context["status"] = "terminal_pass"
コード例 #18
0
    def run_main(self, num_workers, places):
        scope = fluid.Scope()
        with fluid.scope_guard(scope):
            startup_prog, main_prog, image, label, loss = simple_fc_net_static(
            )

            dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM)
            dataloader = DataLoader(dataset,
                                    feed_list=[image, label],
                                    places=places,
                                    num_workers=num_workers,
                                    batch_size=BATCH_SIZE,
                                    drop_last=True)
            # assert len(dataloader) == int(SAMPLE_NUM / BATCH_SIZE)

            exe = fluid.Executor(place=places[0])
            exe.run(startup_prog)

            prog = fluid.CompiledProgram(main_prog)
            if len(places) > 1:
                prog = prog.with_data_parallel(loss_name=loss.name,
                                               places=places)

            step_list = []
            loss_list = []
            start_t = time.time()
            for i in six.moves.range(EPOCH_NUM):
                step = 0
                for d in dataloader:
                    assert len(d) == len(places), "{} != {}".format(
                        len(d), len(places))
                    for i, item in enumerate(d):
                        image = item['image']
                        label = item['label']
                        assert image.shape() == [BATCH_SIZE, IMAGE_SIZE]
                        assert label.shape() == [BATCH_SIZE, 1]
                        assert image._place()._equals(places[i])
                        assert label._place()._equals(places[i])
                    L, = exe.run(program=prog,
                                 feed=d,
                                 fetch_list=[loss],
                                 use_program_cache=True)
                    loss_list.append(np.mean(L))
                    step += 1
                step_list.append(step)

        end_t = time.time()
        ret = {
            "time": end_t - start_t,
            "step": step_list,
            "loss": np.array(loss_list)
        }
        print("time cost", ret['time'], 'step_list', ret['step'])
        return ret
コード例 #19
0
    def test_network(self):
        if self.net is None:
            return

        for use_cuda in [True, False]:
            for use_parallel_executor in [False, True]:
                print('network: {}, use_cuda: {}, use_parallel_executor: {}'.
                      format(self.net.__name__, use_cuda,
                             use_parallel_executor))
                with fluid.program_guard(fluid.Program(), fluid.Program()):
                    with fluid.scope_guard(core.Scope()):
                        train(self.net, use_cuda, use_parallel_executor)
コード例 #20
0
    def build_network(self, context):
        context["model"] = {}
        if len(context["env"]["phase"]) > 1:
            warnings.warn("Cluster Train Only Support One Phase.",
                          category=UserWarning,
                          stacklevel=2)
        model_dict = context["env"]["phase"][0]
        context["model"][model_dict["name"]] = {}
        dataset_name = model_dict["dataset_name"]

        train_program = fluid.Program()
        startup_program = fluid.Program()
        scope = fluid.Scope()
        with fluid.program_guard(train_program, startup_program):
            with fluid.scope_guard(scope):
                model_path = envs.os_path_adapter(
                    envs.workspace_adapter(model_dict["model"]))

                model = envs.lazy_instance_by_fliename(model_path,
                                                       "Model")(context["env"])
                model._data_var = model.input_data(
                    dataset_name=model_dict["dataset_name"])
                if envs.get_global_env("dataset." + dataset_name +
                                       ".type") == "DataLoader":
                    model._init_dataloader(is_infer=False)
                    data_loader = DataLoader(context)
                    data_loader.get_dataloader(context, dataset_name,
                                               model._data_loader)
                model.net(model._data_var, False)
                optimizer = model.optimizer()
                strategy = self._build_strategy(context)
                optimizer = context["fleet"].distributed_optimizer(
                    optimizer, strategy)
                optimizer.minimize(model._cost)

                context["model"][model_dict["name"]]["main_program"] = context[
                    "fleet"].main_program
                context["model"][
                    model_dict["name"]]["startup_program"] = startup_program
                context["model"][model_dict["name"]]["scope"] = scope
                context["model"][model_dict["name"]]["model"] = model
                context["model"][
                    model_dict["name"]]["default_main_program"] = train_program

        context["dataset"] = {}
        for dataset in context["env"]["dataset"]:
            type = envs.get_global_env("dataset." + dataset["name"] + ".type")
            if type != "DataLoader":
                dataset_class = QueueDataset(context)
                context["dataset"][
                    dataset["name"]] = dataset_class.create_dataset(
                        dataset["name"], context)
        context["status"] = "startup_pass"
コード例 #21
0
def predict_onet(infer_data):
    with fluid.scope_guard(infer_onet_scope):
        # 从保存的模型文件中获取预测程序、输入数据的名称和输出层
        [infer_program, feeded_var_names, target_vars
         ] = fluid.io.load_inference_model(dirname='../infer_model/ONet',
                                           executor=onet_exe)
        # 执行预测
        cls_prob, bbox_pred, landmark_pred = onet_exe.run(
            program=infer_program,
            feed={feeded_var_names[0]: infer_data},
            fetch_list=target_vars)
        return cls_prob, bbox_pred, landmark_pred
コード例 #22
0
ファイル: gen_computation_task.py プロジェクト: wgcn96/MBCAL
 def save_model(self, checkpoint_step, path=None):
     """save network model"""
     if path is None:
         path = self.model_dir
     if not exists(path):
         os.makedirs(path)
     with fluid.scope_guard(self.scope):
         fluid.io.save_params(executor=self.base_exe,
                              dirname=path,
                              main_program=self.train_program,
                              filename='model-%d.ckp' % checkpoint_step)
     logging.info('==> Model saved to %s' % path)
コード例 #23
0
    def test_main(self):
        places = [
            core.CPUPlace(),
        ]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))

        for p in places:
            for with_data_parallel in [False, True]:
                with fluid.program_guard(fluid.Program(), fluid.Program()):
                    with fluid.scope_guard(fluid.Scope()):
                        self.run_main(p, with_data_parallel)
コード例 #24
0
def test_inference_model(model_dir, text_list, dataset):
    """
    :param model_dir: model's dir
    :param text_list: a list of input text, which decode as unicode
    :param dataset:
    :return:
    """
    # init executor
    if args.use_cuda:
        place = fluid.CUDAPlace(int(os.getenv('FLAGS_selected_gpus', '0')))
    else:
        place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    # transfer text data to input tensor
    lod = []
    for text in text_list:
        lod.append(
            np.array(dataset.word_to_ids(text.strip())).astype(np.int64))
    base_shape = [[len(c) for c in lod]]
    tensor_words = fluid.create_lod_tensor(lod, base_shape, place)

    # for empty input, output the same empty
    if (sum(base_shape[0]) == 0):
        crf_decode = [tensor_words]
    else:
        # load inference model
        inference_scope = fluid.core.Scope()
        with fluid.scope_guard(inference_scope):
            [inferencer, feed_target_names,
             fetch_targets] = fluid.io.load_inference_model(
                 model_dir,
                 exe,
                 model_filename='model.pdmodel',
                 params_filename='params.pdparams',
             )
            assert feed_target_names[0] == "words"
            print("Load inference model from %s" % (model_dir))

            # get lac result
            crf_decode = exe.run(
                inferencer,
                feed={feed_target_names[0]: tensor_words},
                fetch_list=fetch_targets,
                return_numpy=False,
                use_program_cache=True,
            )

    # parse the crf_decode result
    result = utils.parse_result(tensor_words, crf_decode[0], dataset)
    for i, (sent, tags) in enumerate(result):
        result_list = ['(%s, %s)' % (ch, tag) for ch, tag in zip(sent, tags)]
        print(''.join(result_list))
コード例 #25
0
def infer(test_reader, window_size=5, use_cuda=False, model_path=None):
    """
    inference function
    """
    if model_path is None or not os.path.exists(model_path):
        print(str(model_path) + " cannot be found")
        return
    # get the reverse dict
    #      and define the index of interest word in the window
    #            (mast the same as index of train )
    reverse_word_dict = reverse_dict(word_dict)
    reverse_lbl_dict = reverse_dict(lbl_dict)
    interest_index = int(window_size / 2)

    # define the input layers
    data = fluid.layers.data(
        name="words", shape=[1], dtype="int64", lod_level=1)

    # init paddlepaddle
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)
    feeder = fluid.DataFeeder(feed_list=[data], place=place)
    inference_scope = fluid.core.Scope()

    with fluid.scope_guard(inference_scope):
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(model_path, exe)
        for data_ in test_reader():
            # get the words index and words in char format
            words_index = [[d[0]] for d in data_]
            words = [reverse_word_dict[d[0][interest_index]] for d in data_]

            # use the infer to predict
            prediction = exe.run(inference_program,
                                 feed=feeder.feed(words_index),
                                 fetch_list=fetch_targets,
                                 return_numpy=True)

            # get the label tag and the prediction tag
            label_tag = [reverse_lbl_dict[d[1]] for d in data_]
            prediction_tag = [
                reverse_lbl_dict[p.argmax()] for p in prediction[0]
            ]

            # get the source string and prediction string of POS work
            source_POS = " ".join(
                ["/".join(items) for items in zip(words, label_tag)])
            prediction_POS = " ".join(
                ["/".join(items) for items in zip(words, prediction_tag)])

            # print the result for compare
            print("%s\ns_POS = %s\np_POS = %s" %
                  ("-" * 40, source_POS, prediction_POS))
コード例 #26
0
    def _load_model(self):
        paddle_place = fluid.CUDAPlace(
            0) if self.use_cuda else fluid.CPUPlace()
        self.paddle_exe = fluid.Executor(paddle_place)

        self.vs_scope = fluid.core.Scope()
        with fluid.scope_guard(self.vs_scope):
            [self.vs_net_paddle, self.vs_feed_names,
             self.vs_targets] = fluid.io.load_inference_model(
                 dirname=self.dirname,
                 executor=self.paddle_exe,
                 params_filename=self.params_filename)
コード例 #27
0
    def __init__(self, cfg):
        self.cfg = cfg

        def create_data_layer():
            image_real = fluid.data(
                shape=[None, 3, cfg.image_size, cfg.image_size],
                dtype='float32',
                name='image_real')
            label_org = fluid.data(shape=[None, cfg.c_dim],
                                   dtype='float32',
                                   name='label_org')
            label_trg = fluid.data(shape=[None, cfg.c_dim],
                                   dtype='float32',
                                   name='label_trg')
            return image_real, label_org, label_trg

        self.gen_program = fluid.Program()
        gen_startup_program = fluid.Program()

        with fluid.program_guard(self.gen_program, gen_startup_program):
            self.gen_program.random_seed = cfg.seed
            gen_startup_program.random_seed = cfg.seed
            with fluid.unique_name.guard():
                image_real, label_org, label_trg = create_data_layer()
                generator = Generator(cfg)
                discriminator = Discriminator(cfg)
                g_loss = get_generator_loss(image_real, label_org, label_trg,
                                            generator, discriminator, cfg)
                build_optimizer(generator, cfg, loss=g_loss)

        self.dis_program = fluid.Program()
        dis_startup_program = fluid.Program()
        with fluid.program_guard(self.dis_program, dis_startup_program):
            self.dis_program.random_seed = cfg.seed
            dis_startup_program.random_seed = cfg.seed
            with fluid.unique_name.guard():
                image_real, label_org, label_trg = create_data_layer()
                generator = Generator(cfg)
                discriminator = Discriminator(cfg)
                d_loss = get_discriminator_loss(image_real, label_org,
                                                label_trg, generator,
                                                discriminator, cfg)
                build_optimizer(discriminator, cfg, loss=d_loss)

        self.executor = fluid.Executor(cfg.place)
        self.scope = fluid.Scope()

        with fluid.scope_guard(self.scope):
            self.executor.run(gen_startup_program)
            self.executor.run(dis_startup_program)

        self.g_loss = g_loss
        self.d_loss = d_loss
コード例 #28
0
    def _executor_dataset_train(self, model_dict, context):
        reader_name = model_dict["dataset_name"]
        model_name = model_dict["name"]
        model_class = context["model"][model_dict["name"]]["model"]
        fetch_vars = []
        fetch_alias = []
        fetch_period = int(
            envs.get_global_env(
                "runner." + context["runner_name"] + ".print_interval", 20))

        scope = context["model"][model_name]["scope"]
        program = context["model"][model_name]["main_program"]
        reader = context["dataset"][reader_name]

        with fluid.scope_guard(scope):
            if context["is_infer"]:
                metrics = model_class.get_infer_results()
                if metrics:
                    fetch_vars = metrics.values()
                    fetch_alias = metrics.keys()
                context["exe"].infer_from_dataset(program=program,
                                                  dataset=reader,
                                                  fetch_list=fetch_vars,
                                                  fetch_info=fetch_alias,
                                                  print_period=fetch_period,
                                                  debug=envs.get_global_env(
                                                      "debug", False))
            else:
                metrics = model_class.get_metrics()
                if metrics:
                    fetch_vars = metrics.values()
                    fetch_alias = metrics.keys()
                with fluid.scope_guard(scope):
                    context["exe"].train_from_dataset(
                        program=program,
                        dataset=reader,
                        fetch_list=fetch_vars,
                        fetch_info=fetch_alias,
                        print_period=fetch_period,
                        debug=envs.get_global_env("debug", False))
コード例 #29
0
def transform_and_save_int8_model(original_path, save_path):
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    inference_scope = fluid.executor.global_scope()
    with fluid.scope_guard(inference_scope):
        if os.path.exists(os.path.join(original_path, '__model__')):
            [inference_program, feed_target_names, fetch_targets
             ] = fluid.io.load_inference_model(original_path, exe)
        else:
            [inference_program, feed_target_names, fetch_targets
             ] = fluid.io.load_inference_model(original_path, exe, 'model',
                                               'params')

        ops_to_quantize = set()
        if len(test_args.ops_to_quantize) > 0:
            ops_to_quantize = set(test_args.ops_to_quantize.split(','))

        op_ids_to_skip = set([-1])
        if len(test_args.op_ids_to_skip) > 0:
            op_ids_to_skip = set(map(int, test_args.op_ids_to_skip.split(',')))

        graph = IrGraph(core.Graph(inference_program.desc), for_test=True)
        if (test_args.debug):
            graph.draw('.', 'quant_orig', graph.all_op_nodes())
        transform_to_mkldnn_int8_pass = Quant2Int8MkldnnPass(
            ops_to_quantize,
            _op_ids_to_skip=op_ids_to_skip,
            _scope=inference_scope,
            _place=place,
            _core=core,
            _debug=test_args.debug)
        graph = transform_to_mkldnn_int8_pass.apply(graph)
        inference_program = graph.to_program()
        with fluid.scope_guard(inference_scope):
            fluid.io.save_inference_model(save_path, feed_target_names,
                                          fetch_targets, exe,
                                          inference_program)
        print(
            "Success! INT8 model obtained from the Quant model can be found at {}\n"
            .format(save_path))
コード例 #30
0
    def _executor_dataloader_train(self, model_dict, context):
        model_name = model_dict["name"]
        model_class = context["model"][model_dict["name"]]["model"]
        program = self._get_dataloader_program(model_dict, context)

        fetch_period = int(
            envs.get_global_env("runner." + context["runner_name"] +
                                ".print_interval", 20))
        if context["is_infer"]:
            metrics = model_class.get_infer_results()
        else:
            metrics = model_class.get_metrics()

        metrics_varnames = []
        metrics_format = []
        metrics_names = ["total_batch"]
        metrics_format.append("{}: {{}}".format("batch"))
        for name, var in metrics.items():
            metrics_names.append(name)
            metrics_varnames.append(var.name)
            metrics_format.append("{}: {{}}".format(name))
        metrics_format = ", ".join(metrics_format)

        reader = context["model"][model_dict["name"]]["model"]._data_loader
        reader.start()
        batch_id = 0
        scope = context["model"][model_name]["scope"]
        result = None
        with fluid.scope_guard(scope):
            try:
                while True:
                    metrics_tensors = context["exe"].run(
                        program=program,
                        fetch_list=metrics_varnames,
                        return_numpy=False)
                    metrics = [batch_id]

                    metrics_rets = [
                        as_numpy(metrics_tensor)
                        for metrics_tensor in metrics_tensors
                    ]
                    metrics.extend(metrics_rets)

                    if batch_id % fetch_period == 0 and batch_id != 0:
                        print(metrics_format.format(*metrics))
                    batch_id += 1
            except fluid.core.EOFException:
                reader.reset()

        if batch_id > 0:
            result = dict(zip(metrics_names, metrics))
        return result
コード例 #31
0
    def batch_predict(self, img_file_list, transforms=None):
        """预测。

        Args:
            img_file_list(list|tuple): 对列表(或元组)中的图像同时进行预测,列表中的元素可以是图像路径
                也可以是解码后的排列格式为(H,W,C)且类型为float32且为BGR格式的数组。
            transforms (paddlex.det.transforms): 数据预处理操作。
        Returns:
            dict: 每个元素都为列表,表示各图像的预测结果。在各图像的预测结果列表中,每个预测结果由预测框类别标签、预测框类别名称、
                  预测框坐标(坐标格式为[xmin, ymin, w, h])、
                  原图大小的预测二值图(1表示预测框类别,0表示背景类)、
                  预测框得分组成。
        """
        if transforms is None and not hasattr(self, 'test_transforms'):
            raise Exception("transforms need to be defined, now is None.")

        if not isinstance(img_file_list, (list, tuple)):
            raise Exception("im_file must be list/tuple")

        if transforms is None:
            transforms = self.test_transforms
        input_channel = getattr(self, 'input_channel', 3)
        im, im_resize_info, im_shape = FasterRCNN._preprocess(
            img_file_list,
            transforms,
            self.model_type,
            self.__class__.__name__,
            self.thread_pool,
            input_channel=input_channel)

        with fluid.scope_guard(self.scope):
            result = self.exe.run(self.test_prog,
                                  feed={
                                      'image': im,
                                      'im_info': im_resize_info,
                                      'im_shape': im_shape
                                  },
                                  fetch_list=list(self.test_outputs.values()),
                                  return_numpy=False,
                                  use_program_cache=True)

        res = {
            k: (np.array(v), v.recursive_sequence_lengths())
            for k, v in zip(list(self.test_outputs.keys()), result)
        }
        res['im_id'] = (np.array([[i] for i in range(len(img_file_list))
                                  ]).astype('int32'), [])
        res['im_shape'] = (np.array(im_shape), [])
        preds = MaskRCNN._postprocess(res, len(img_file_list),
                                      self.num_classes,
                                      self.mask_head_resolution, self.labels)
        return preds
コード例 #32
0
def infer(use_cuda, params_dirname=None):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        #Get the inference program using fluid.io.load_inference_model,
        #feed variable name by feed_target_names and fetch fetch_targets from scope
        [inferencer, feed_target_names,
        fetch_targets] = fluid.io.load_inference_model(params_dirname, exe)

        # Set the input and use 4 LoDTensor to represent 4 words. Each word here is an id,
        # Used to query the embedding table to get the corresponding word vector, so its shape size is [1].
        # recursive_sequence_lengths sets the length based on LoD, so it should all be set to [[1]]
        # Note that recursive_sequence_lengths is a list of lists
        data1 = numpy.asarray([[211]], dtype=numpy.int64)  # 'among'
        data2 = numpy.asarray([[6]], dtype=numpy.int64)  # 'a'
        data3 = numpy.asarray([[96]], dtype=numpy.int64)  # 'group'
        data4 = numpy.asarray([[4]], dtype=numpy.int64)  # 'of'
        lod = numpy.asarray([[1]], dtype=numpy.int64)

        first_word = fluid.create_lod_tensor(data1, lod, place)
        second_word = fluid.create_lod_tensor(data2, lod, place)
        third_word = fluid.create_lod_tensor(data3, lod, place)
        fourth_word = fluid.create_lod_tensor(data4, lod, place)

        assert feed_target_names[0] == 'firstw'
        assert feed_target_names[1] == 'secondw'
        assert feed_target_names[2] == 'thirdw'
        assert feed_target_names[3] == 'fourthw'

        # Construct the feed dictionary {feed_target_name: feed_target_data}
        # Prediction results are included in results
        results = exe.run(
            inferencer,
            feed={
                feed_target_names[0]: first_word,
                feed_target_names[1]: second_word,
                feed_target_names[2]: third_word,
                feed_target_names[3]: fourth_word
            },
            fetch_list=fetch_targets,
            return_numpy=False)

        print(numpy.array(results[0]))
        most_possible_word_index = numpy.argmax(results[0])
        print(most_possible_word_index)
        print([
            key for key, value in six.iteritems(word_dict)
            if value == most_possible_word_index
        ][0])
コード例 #33
0
    def _test(self, run_ipu=True):
        scope = fluid.core.Scope()
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        main_prog.random_seed = SEED
        startup_prog.random_seed = SEED
        np.random.seed(SEED)

        np_image = np.random.rand(1, 3, 10, 10).astype(np.float32)

        with fluid.scope_guard(scope):
            with paddle.static.program_guard(main_prog, startup_prog):
                image = paddle.static.data(name='image',
                                           shape=[1, 3, 10, 10],
                                           dtype='float32')
                conv1 = paddle.static.nn.conv2d(image,
                                                num_filters=3,
                                                filter_size=3,
                                                bias_attr=False)
                loss = paddle.mean(conv1)

                sgd = paddle.optimizer.SGD(learning_rate=LR_New())
                sgd.minimize(loss)

            if run_ipu:
                place = paddle.IPUPlace()
            else:
                place = paddle.CPUPlace()
            exe = paddle.static.Executor(place)
            exe.run(startup_prog)

            if run_ipu:
                feed_list = [image.name]
                fetch_list = [loss.name]
                ipu_strategy = compiler.get_ipu_strategy()
                ipu_strategy.is_training = True
                program = compiler.IPUCompiledProgram(
                    main_prog,
                    ipu_strategy=ipu_strategy).compile(feed_list, fetch_list)
            else:
                program = main_prog

            result = []
            for epoch in range(100):
                if hasattr(program, "lr_sheduler"):
                    program.lr_sheduler.step()
                loss_res = exe.run(program,
                                   feed={image.name: np_image},
                                   fetch_list=[loss])
                result.append(loss_res)

            return np.array(result)
コード例 #34
0
    def test_prune_with_cache_program2(self):
        '''
        When use_prune=True, Executor should cache the pruned program.
        If the only difference in fetch_list is  optimize_ops during multiple runs, 
        the cache_keys should be different and get different pruned program.
        '''
        with _mock_guard(mock):
            exe = fluid.Executor(fluid.CPUPlace())
            exe.prune_called_times = 0
            program = framework.Program()
            startup_program = framework.Program()
            scope = fluid.Scope()
            with fluid.scope_guard(scope):
                with fluid.program_guard(program, startup_program):
                    (x1, x2, y1, y2, label, loss1, loss2, w1_param_attrs,
                     w2_param_attrs) = self.net2()
                    adam_optimizer1 = fluid.optimizer.AdamOptimizer(
                        learning_rate=0.5)
                    train1 = adam_optimizer1.minimize(loss1)
                    adam_optimizer2 = fluid.optimizer.AdamOptimizer(
                        learning_rate=0.5)
                    train2 = adam_optimizer2.minimize(loss2)
                    exe.run(startup_program)
                    x_np = np.random.random(size=(10, 2)).astype('float32')
                    label_np = np.random.randint(1,
                                                 size=(10, 1)).astype('int64')

                    for i in range(10):
                        if i % 2:
                            res = exe.run(program,
                                          feed={
                                              'x1': x_np,
                                              'x2': x_np,
                                              'label': label_np
                                          },
                                          fetch_list=[loss1, loss2, train1],
                                          use_prune=True)
                        else:
                            res = exe.run(program,
                                          feed={
                                              'x1': x_np,
                                              'x2': x_np,
                                              'label': label_np
                                          },
                                          fetch_list=[loss1, loss2, train2],
                                          use_prune=True)
                        if i == 0:
                            self.assertEqual(exe.prune_called_times, 1)
                        elif i == 1:
                            self.assertEqual(exe.prune_called_times, 2)
                        else:
                            self.assertEqual(exe.prune_called_times, 2)
コード例 #35
0
def convert(args):
    ernie_export_path = f'{args.ernie_path}/ernie_persistables.pkl'
    pretraining_params_path = f'{args.ernie_path}/paddle/params'
    ernie_config_path = f'{args.ernie_path}/paddle/ernie_config.json'
    ernie_vocab_path = f'{args.ernie_path}/paddle/vocab.txt'
    unzip_message = f"Please unzip ERNIE paddle param archive into {args.ernie_path}/paddle"
    if not os.path.exists(pretraining_params_path):
        print(f"{pretraining_params_path} does not exist.", file=sys.stderr)
        print(unzip_message, file=sys.stderr)
        sys.exit(1)
    if not os.path.exists(ernie_config_path):
        print(f"{ernie_config_path} does not exist.", file=sys.stderr)
        print(unzip_message, file=sys.stderr)
        sys.exit(1)
    if not os.path.exists(ernie_vocab_path):
        print(f"{ernie_vocab_path} does not exist.", file=sys.stderr)
        print(unzip_message, file=sys.stderr)
        sys.exit(1)

    ernie_config = ErnieConfig(ernie_config_path)
    # Fix missing use_task_id
    ernie_config._config_dict['use_task_id'] = True
    ernie_config.print_config()

    place = fluid.CPUPlace()
    exe = fluid.Executor(place)


    startup_prog = fluid.Program()
    train_program = fluid.Program()

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        with fluid.program_guard(train_program, startup_prog):
            with fluid.unique_name.guard():
                _ = create_model(args, ernie_config=ernie_config)

                init_pretraining_params(
                    exe,
                    pretraining_params_path,
                    main_program=startup_prog,
                    use_fp16=args.use_fp16)
                persistables = dict()
                for var in filter(fluid.io.is_persistable, train_program.list_vars()):
                    numpy_value = fetch_var(var.name, inference_scope)
                    persistables[var.name] = numpy_value
                    if args.verbose:
                        print(var.name)
                print("totally", len(persistables), "persistables")
                with open(ernie_export_path, 'wb') as f:
                    pickle.dump(persistables, f)
    return train_program
コード例 #36
0
 def quantize(self):
     '''
     Quantize the fp32 model. Use calibrate data to calculate the scale factor of
     quantized variables, and inserts fake quant/dequant op to obtain the
     quantized model.
     Args:
         None
     Returns:
         the program of quantized model.
     '''
     self._load_model_data()
     self._collect_target_varnames()
     self._set_activation_persistable()
     batch_ct = 0
     for data in self._data_loader():
         batch_ct += 1
         if self._batch_nums and batch_ct >= self._batch_nums:
             break
     batch_id = 0
     logging.info("Start to run batch!")
     for data in self._data_loader():
         start = time.time()
         with fluid.scope_guard(self._scope):
             self._executor.run(program=self._program,
                                feed=data,
                                fetch_list=self._fetch_list,
                                return_numpy=False)
         if self._algo == "KL":
             self._sample_data(batch_id)
         else:
             self._sample_threshold()
         end = time.time()
         logging.debug(
             '[Run batch data] Batch={}/{}, time_each_batch={} s.'.format(
                 str(batch_id + 1), str(batch_ct), str(end - start)))
         batch_id += 1
         if self._batch_nums and batch_id >= self._batch_nums:
             break
     logging.info("All run batch: ".format(batch_id))
     self._reset_activation_persistable()
     logging.info("Calculate scale factor ...")
     if self._algo == "KL":
         self._calculate_kl_threshold()
     logging.info("Update the program ...")
     if self._algo in ["KL", "abs_max"]:
         self._update_program()
     else:
         self._save_input_threhold()
     logging.info("Save ...")
     self._save_output_threshold()
     logging.info("Finish quant!")
     return self._program
コード例 #37
0
    def _process_bow_feed(self, sentences):
        seqs, seqs_lens = [], []
        for sentence in sentences:
            seq = [
                self.vocabulary[token] for token in sentence
                if token in self.vocabulary
            ]
            seqs.append(seq)
            seqs_lens.append(len(seq))

        with fluid.scope_guard(self.infer_scope):
            seqs_lod = fluid.create_lod_tensor(seqs, [seqs_lens], self.place)
            return {'seq': seqs_lod}
コード例 #38
0
ファイル: infer.py プロジェクト: wi-code/Python3Notes
def infer_once(args):
    # check models file has already been finished
    if os.path.exists(args.model_output_dir + "/_success"):
        logger.info("using models from " + args.model_output_dir)
        exe = fluid.Executor(fluid.CPUPlace())
        Scope = fluid.Scope()
        inference_prog()
        with fluid.scope_guard(Scope):
            fluid.io.load_persistables(
                executor=exe, dirname=args.model_output_dir + "/")
            inference_test(Scope, args.model_output_dir, args)
    else:
        logger.info("Wrong Directory or save model failed!")
コード例 #39
0
ファイル: detection.py プロジェクト: luohuaqiuyu/EduWatching
 def count_number(self, image_path):
     with fluid.scope_guard(self.scope):
         data = next(reader(image_path))
         outputs = self.exe.run(self.infer_program,
                                feed=self.feeder.feed(data),
                                fetch_list=self.fetch_targets,
                                return_numpy=False)
         bboxes = numpy.array(outputs[0])
         scores = bboxes[:, 1].astype('float32')
         num = 0
         for x in scores:
             if x >= self.score_thresh: num += 1
         return num
コード例 #40
0
def infer(use_cuda, save_dirname=None):
    if save_dirname is None:
        return

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)

        # Setup input by creating LoDTensor to represent sequence of words.
        # Here each word is the basic element of the LoDTensor and the shape of 
        # each word (base_shape) should be [1] since it is simply an index to 
        # look up for the corresponding word vector.
        # Suppose the length_based level of detail (lod) info is set to [[4, 6]],
        # which has only one lod level. Then the created LoDTensor will have only 
        # one higher level structure (sequence of words, or sentence) than the basic 
        # element (word). Hence the LoDTensor will hold data for two sentences of 
        # length 4 and 6, respectively. 
        # Note that lod info should be a list of lists.
        lod = [[4, 6]]
        base_shape = [1]
        # The range of random integers is [low, high]
        word_data = fluid.create_random_int_lodtensor(
            lod, base_shape, place, low=0, high=1)
        trg_word = fluid.create_random_int_lodtensor(
            lod, base_shape, place, low=0, high=1)

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.
        assert feed_target_names[0] == 'source_sequence'
        assert feed_target_names[1] == 'target_sequence'
        results = exe.run(inference_program,
                          feed={
                              feed_target_names[0]: word_data,
                              feed_target_names[1]: trg_word,
                          },
                          fetch_list=fetch_targets,
                          return_numpy=False)
        print(results[0].lod())
        np_data = np.array(results[0])
        print("Inference shape: ", np_data.shape)
        print("Inference results: ", np_data)
コード例 #41
0
def infer(use_cuda, save_dirname=None):
    if save_dirname is None:
        return

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)

        # The input's dimension of conv should be 4-D or 5-D.
        # Use normilized image pixels as input data, which should be in the range [0, 1.0].
        batch_size = 1
        tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32")

        # Use inference_transpiler to speedup
        inference_transpiler_program = inference_program.clone()
        t = fluid.InferenceTranspiler()
        t.transpile(inference_transpiler_program, place)

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.
        results = exe.run(inference_program,
                          feed={feed_target_names[0]: tensor_img},
                          fetch_list=fetch_targets)

        transpiler_results = exe.run(inference_transpiler_program,
                                     feed={feed_target_names[0]: tensor_img},
                                     fetch_list=fetch_targets)

        assert len(results[0]) == len(transpiler_results[0])
        for i in range(len(results[0])):
            np.testing.assert_almost_equal(
                results[0][i], transpiler_results[0][i], decimal=5)

        print("infer results: ", results[0])

        fluid.io.save_inference_model(save_dirname, feed_target_names,
                                      fetch_targets, exe,
                                      inference_transpiler_program)
コード例 #42
0
ファイル: test_runner.py プロジェクト: absorbguo/Paddle
def main():
    sys.path.append(os.getcwd())
    some_test_failed = False
    for module_name in sys.argv[1:]:
        buffer = cStringIO.StringIO()
        main = fluid.Program()
        startup = fluid.Program()
        scope = fluid.core.Scope()
        with fluid.program_guard(main, startup):
            with fluid.scope_guard(scope):
                with fluid.unique_name.guard():
                    test_loader = unittest.TestLoader()
                    module = importlib.import_module(module_name)
                    tests = test_loader.loadTestsFromModule(module)
                    res = unittest.TextTestRunner(stream=buffer).run(tests)
                    if not res.wasSuccessful():
                        some_test_failed = True
                        print >> sys.stderr, module_name, 'failed\n', buffer.getvalue(
                        )

    if some_test_failed:
        exit(1)
コード例 #43
0
def infer(use_cuda, save_dirname=None):
    if save_dirname is None:
        return

    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        # Use fluid.io.load_inference_model to obtain the inference program desc,
        # the feed_target_names (the names of variables that will be feeded
        # data using feed operators), and the fetch_targets (variables that
        # we want to obtain data from using fetch operators).
        [inference_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)

        # Use the first data from paddle.dataset.movielens.test() as input
        assert feed_target_names[0] == "user_id"
        # Use create_lod_tensor(data, lod, place) API to generate LoD Tensor
        # where `data` is a list of sequences of index numbers, `lod` is 
        # the level of detail (lod) info associated with `data`.
        # For example, data = [[10, 2, 3], [2, 3]] means that it contains
        # two sequences of indexes, of length 3 and 2, respectively.
        # Correspondingly, lod = [[3, 2]] contains one level of detail info,
        # indicating that `data` consists of two sequences of length 3 and 2. 
        user_id = fluid.create_lod_tensor([[1]], [[1]], place)

        assert feed_target_names[1] == "gender_id"
        gender_id = fluid.create_lod_tensor([[1]], [[1]], place)

        assert feed_target_names[2] == "age_id"
        age_id = fluid.create_lod_tensor([[0]], [[1]], place)

        assert feed_target_names[3] == "job_id"
        job_id = fluid.create_lod_tensor([[10]], [[1]], place)

        assert feed_target_names[4] == "movie_id"
        movie_id = fluid.create_lod_tensor([[783]], [[1]], place)

        assert feed_target_names[5] == "category_id"
        category_id = fluid.create_lod_tensor([[10, 8, 9]], [[3]], place)

        assert feed_target_names[6] == "movie_title"
        movie_title = fluid.create_lod_tensor([[1069, 4140, 2923, 710, 988]],
                                              [[5]], place)

        # Construct feed as a dictionary of {feed_target_name: feed_target_data}
        # and results will contain a list of data corresponding to fetch_targets.
        results = exe.run(inference_program,
                          feed={
                              feed_target_names[0]: user_id,
                              feed_target_names[1]: gender_id,
                              feed_target_names[2]: age_id,
                              feed_target_names[3]: job_id,
                              feed_target_names[4]: movie_id,
                              feed_target_names[5]: category_id,
                              feed_target_names[6]: movie_title
                          },
                          fetch_list=fetch_targets,
                          return_numpy=False)
        print("inferred score: ", np.array(results[0]))