Exemple #1
0
def infer():
    place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    class_nums = cfg.class_num
    model = model_builder.RRPN(add_conv_body_func=resnet.ResNet(),
                               add_roi_box_head_func=resnet.ResNetC5(),
                               use_pyreader=False,
                               mode='infer')
    startup_prog = fluid.Program()
    infer_prog = fluid.Program()
    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            model.build_model()
            pred_boxes = model.eval_bbox_out()
    infer_prog = infer_prog.clone(True)
    exe.run(startup_prog)
    fluid.load(infer_prog, cfg.pretrained_model, exe)
    infer_reader = reader.infer(cfg.image_path)
    data_loader = model.data_loader
    data_loader.set_sample_list_generator(infer_reader, places=place)
    fetch_list = [pred_boxes]
    imgs = os.listdir(cfg.image_path)
    imgs.sort()

    for i, data in enumerate(data_loader()):
        result = exe.run(infer_prog,
                         fetch_list=[v.name for v in fetch_list],
                         feed=data,
                         return_numpy=False)
        nmsed_out = result[0]
        im_info = np.array(data[0]['im_info'])[0]
        im_scale = im_info[2]
        outs = np.array(nmsed_out)
        draw_bounding_box_on_image(cfg.image_path, imgs[i], outs, im_scale,
                                   cfg.draw_threshold)
Exemple #2
0
def create_predictor(args):
    def create_input():
        image = fluid.data(
            name='image', shape=[None, 3, 224, 224], dtype='float32')
        return image

    def create_model(args, model, input, class_dim=1000):
        if args.model == "GoogLeNet":
            out, _, _ = model.net(input=input, class_dim=class_dim)
        else:
            out = model.net(input=input, class_dim=class_dim)
            out = fluid.layers.softmax(out)
        return out

    if "EfficientNet" in args.model:
        model = architectures.__dict__[args.model](is_test=True)
    else:
        model = architectures.__dict__[args.model]()

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()

    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            image = create_input()
            out = create_model(args, model, image)

    infer_prog = infer_prog.clone(for_test=True)
    fluid.load(
        program=infer_prog, model_path=args.pretrained_model, executor=exe)

    return exe, infer_prog, [image.name], [out.name]
Exemple #3
0
def main():
    args = parse_args()

    model = architectures.__dict__[args.model]()

    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()

    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            image = create_input(args.img_size)
            out = create_model(args, model, image, class_dim=args.class_dim)

    infer_prog = infer_prog.clone(for_test=True)
    fluid.load(program=infer_prog,
               model_path=args.pretrained_model,
               executor=exe)

    fluid.io.save_inference_model(dirname=args.output_path,
                                  feeded_var_names=[image.name],
                                  main_program=infer_prog,
                                  target_vars=out,
                                  executor=exe,
                                  model_filename='model',
                                  params_filename='params')
Exemple #4
0
def main():
    args = parse_args()

    model = architectures.__dict__[args.model]()

    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()

    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            image = create_input(args.img_size)
            out = create_model(args, model, image, class_dim=args.class_dim)

    infer_prog = infer_prog.clone(for_test=True)
    fluid.load(program=infer_prog,
               model_path=args.pretrained_model,
               executor=exe)

    model_path = os.path.join(args.output_path, "ppcls_model")
    conf_path = os.path.join(args.output_path, "ppcls_client_conf")
    serving_io.save_model(model_path, conf_path, {"image": image},
                          {"prediction": out}, infer_prog)
Exemple #5
0
    def init_lanenet(self):
        '''
        initlize the paddlepaddle model
        '''

        startup_prog = fluid.Program()
        test_prog = fluid.Program()
        self.pred, self.logit = build_model(test_prog,
                                            startup_prog,
                                            phase=ModelPhase.VISUAL)
        # Clone forward graph
        test_prog = test_prog.clone(for_test=True)

        # Get device environment
        place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
        self.exe = fluid.Executor(place)
        self.exe.run(startup_prog)

        ckpt_dir = self.weight_path
        if ckpt_dir is not None:
            print('load test model:', ckpt_dir)
            try:
                fluid.load(test_prog, os.path.join(ckpt_dir, 'model'),
                           self.exe)
            except:
                fluid.io.load_params(self.exe,
                                     ckpt_dir,
                                     main_program=test_prog)

        self.postprocessor = lanenet_postprocess.LaneNetPostProcessor()
Exemple #6
0
    def test_save_load_same_result(self):
        x = np.random.randn(30, 10, 32).astype('float32')
        weight = np.random.randn(32, 64).astype('float32')
        with fluid.dygraph.guard(place):
            dygraph_result = simple_func(x, weight)

        main_program, startup_program, inputs, outputs = decorated_simple_func(
            x, weight)
        exe = fluid.Executor(place)
        exe.run(startup_program)
        fluid.save(main_program, "./test_dy2stat_save_load")

        # set vars to zero so that we can test load in same file
        for var in main_program.list_vars():
            if isinstance(var, framework.Parameter) or var.persistable:
                tensor = fluid.global_scope().find_var(var.name).get_tensor()
                tensor.set(np.zeros_like(np.array(tensor)), place)

                # make sure all the paramerter or optimizer var have been set to zero
                tensor_np = np.array(fluid.global_scope().find_var(
                    var.name).get_tensor())
                self.assertEqual(0, np.sum(np.abs(tensor_np)))

        fluid.load(main_program, "./test_dy2stat_save_load")
        static_result = exe.run(main_program,
                                feed={inputs[0].name: x},
                                fetch_list=outputs)
        self.assertTrue(np.allclose(dygraph_result.numpy(), static_result))
Exemple #7
0
    def net_initialize(self,
                       startup_prog=None,
                       pretrain_weights=None,
                       resume_weights=None):
        if startup_prog is None:
            startup_prog = fluid.default_startup_program()
        self.exe.run(startup_prog)
        if resume_weights is not None:
            logging.info("Resume weights from {}".format(resume_weights))
            if not osp.exists(resume_weights):
                raise Exception("Path {} not exists.".format(resume_weights))
            fluid.load(self.train_prog, osp.join(resume_weights, 'model'),
                       self.exe)
            # Check is path ended by path spearator
            if resume_weights[-1] == os.sep:
                resume_weights = resume_weights[0:-1]
            epoch_name = osp.basename(resume_weights)
            # If resume weights is end of digit, restore epoch status
            epoch = epoch_name.split('_')[-1]
            if epoch.isdigit():
                self.begin_epoch = int(epoch)
            else:
                raise ValueError("Resume model path is not valid!")
            logging.info("Model checkpoint loaded successfully!")

        elif pretrain_weights is not None:
            logging.info(
                "Load pretrain weights from {}.".format(pretrain_weights))
            utils.load_pretrained_weights(self.exe, self.train_prog,
                                          pretrain_weights)
def _get_activations_from_ims(img, model, batch_size, dims, use_gpu,
                              premodel_path):
    n_batches = (len(img) + batch_size - 1) // batch_size
    n_used_img = len(img)

    pred_arr = np.empty((n_used_img, dims))

    for i in tqdm(range(n_batches)):
        start = i * batch_size
        end = start + batch_size
        if end > len(img):
            end = len(img)
        images = img[start:end]
        if images.shape[1] != 3:
            images = images.transpose((0, 3, 1, 2))
        images /= 255

        output, main_program, startup_program = _build_program(model)
        place = fluid.cuda_places()[0] if use_gpu else fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(startup_program)

        fluid.load(main_program,
                   os.path.join(premodel_path, 'paddle_inceptionv3'), exe)
        pred = exe.run(main_program,
                       feed={'images': images},
                       fetch_list=[output])[0]

        pred_arr[start:end] = pred.reshape(end - start, -1)

    return pred_arr
Exemple #9
0
def load_checkpoint(exe, program):
    """
    Load checkpoiont for resuming training
    """
    model_path = cfg.TRAIN.RESUME_MODEL_DIR
    print('Resume model training from:', model_path)
    if not os.path.exists(model_path):
        raise ValueError(
            "TRAIN.PRETRAIN_MODEL {} not exist!".format(model_path))
    fluid.load(program, os.path.join(model_path, 'model'), exe)

    # Check is path ended by path spearator
    if model_path[-1] == os.sep:
        model_path = model_path[0:-1]
    epoch_name = os.path.basename(model_path)
    # If resume model is final model
    if epoch_name == 'final':
        begin_epoch = cfg.SOLVER.NUM_EPOCHS
    # If resume model path is end of digit, restore epoch status
    elif epoch_name.isdigit():
        epoch = int(epoch_name)
        begin_epoch = epoch + 1
    else:
        raise ValueError("Resume model path is not valid!")
    print("Model checkpoint loaded successfully!")
    return begin_epoch
Exemple #10
0
def evaluate():
    if cfg.use_model == "crnn_ctc":
        eval = ctc_eval
        get_feeder_data = get_ctc_feeder_data
    else:
        eval = attention_eval
        get_feeder_data = get_attention_feeder_data

    # define network
    evaluator, cost = eval(cfg.data_shape, cfg.num_classes, use_cudnn=cfg.use_gpu)

    # data reader
    test_reader = data_reader.test(prefix_path=cfg.test_prefix, model=cfg.use_model)

    # prepare environment
    place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    # load init model
    if cfg.init_model:
        fluid.load(program=fluid.default_main_program(),
                   model_path=cfg.init_model,
                   executor=exe,
                   var_list=fluid.io.get_program_parameter(fluid.default_main_program()))
        print("Init model from: %s." % cfg.init_model)

    evaluator.reset(exe)
    count = 0
    for data in test_reader():
        count += 1
        exe.run(fluid.default_main_program(), feed=get_feeder_data(data, place))
    avg_distance, avg_seq_error = evaluator.eval(exe)
    print("Read %d samples; avg_distance: %s; avg_seq_error: %s" % (count, avg_distance, avg_seq_error))
def create_predictor(model_type, pretrained_model):
    def create_input():
        image = fluid.data(
            name='image', shape=[None, 3, 160, 160], dtype='float32')
        return image

    def create_model(model_type, model, input, class_dim=19):
        if model_type == "GoogLeNet":
            out, _, _ = model.net(input=input, class_dim=class_dim)
        else:
            out = model.net(input=input, class_dim=class_dim)
            out = fluid.layers.softmax(out)
        return out

    model = architectures.__dict__[model_type]()

    place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)

    startup_prog = fluid.Program()
    infer_prog = fluid.Program()

    with fluid.program_guard(infer_prog, startup_prog):
        with fluid.unique_name.guard():
            image = create_input()
            out = create_model(model_type, model, image)

    infer_prog = infer_prog.clone(for_test=True)
    fluid.load(program=infer_prog, model_path=pretrained_model, executor=exe)

    return exe, infer_prog, [image.name], [out.name]
Exemple #12
0
def infer_epoch(args, vocab_size, test_reader, use_cuda, i2w):
    """ inference function """
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)
    emb_size = args.emb_size
    batch_size = args.batch_size
    with fluid.scope_guard(fluid.Scope()):
        main_program = fluid.Program()
        with fluid.program_guard(main_program):
            values, pred = net.infer_network(vocab_size, emb_size)
            for epoch in range(start_index, last_index + 1):
                copy_program = main_program.clone()
                model_path = model_dir + "/pass-" + str(epoch)
                fluid.load(copy_program, model_path, exe)
                accum_num = 0
                accum_num_sum = 0.0
                t0 = time.time()
                step_id = 0
                for data in test_reader():
                    step_id += 1
                    b_size = len([dat[0] for dat in data])
                    wa = np.array([dat[0] for dat in data
                                   ]).astype("int64").reshape(b_size)
                    wb = np.array([dat[1] for dat in data
                                   ]).astype("int64").reshape(b_size)
                    wc = np.array([dat[2] for dat in data
                                   ]).astype("int64").reshape(b_size)

                    label = [dat[3] for dat in data]
                    input_word = [dat[4] for dat in data]
                    para = exe.run(copy_program,
                                   feed={
                                       "analogy_a":
                                       wa,
                                       "analogy_b":
                                       wb,
                                       "analogy_c":
                                       wc,
                                       "all_label":
                                       np.arange(vocab_size).reshape(
                                           vocab_size).astype("int64"),
                                   },
                                   fetch_list=[pred.name, values],
                                   return_numpy=False)
                    pre = np.array(para[0])
                    val = np.array(para[1])
                    for ii in range(len(label)):
                        top4 = pre[ii]
                        accum_num_sum += 1
                        for idx in top4:
                            if int(idx) in input_word[ii]:
                                continue
                            if int(idx) == int(label[ii][0]):
                                accum_num += 1
                            break
                    if step_id % 1 == 0:
                        print("step:%d %d " % (step_id, accum_num))

                print("epoch:%d \t acc:%.3f " %
                      (epoch, 1.0 * accum_num / accum_num_sum))
Exemple #13
0
def do_save_inference_model(args):

    test_prog = fluid.default_main_program()
    startup_prog = fluid.default_startup_program()

    with fluid.program_guard(test_prog, startup_prog):
        test_prog.random_seed = args.random_seed
        startup_prog.random_seed = args.random_seed

        with fluid.unique_name.guard():

            context_wordseq = fluid.data(name='context_wordseq',
                                         shape=[-1, 1],
                                         dtype='int64',
                                         lod_level=1)
            response_wordseq = fluid.data(name='response_wordseq',
                                          shape=[-1, 1],
                                          dtype='int64',
                                          lod_level=1)
            labels = fluid.data(name='labels', shape=[-1, 1], dtype='int64')

            input_inst = [context_wordseq, response_wordseq, labels]
            input_field = InputField(input_inst)
            data_reader = fluid.io.DataLoader.from_generator(
                feed_list=input_inst, capacity=4, iterable=False)

            logits = create_net(is_training=False,
                                model_input=input_field,
                                args=args)

    if args.use_cuda:
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()

    exe = fluid.Executor(place)
    exe.run(startup_prog)

    assert (args.init_from_params) or (args.init_from_pretrain_model)

    if args.init_from_params:
        fluid.load(test_prog, args.init_from_params)
    elif args.init_from_pretrain_model:
        fluid.load(test_prog, args.init_from_pretrain_model)

    # saving inference model
    fluid.io.save_inference_model(args.inference_model_dir,
                                  feeded_var_names=[
                                      input_field.context_wordseq.name,
                                      input_field.response_wordseq.name,
                                  ],
                                  target_vars=[
                                      logits,
                                  ],
                                  executor=exe,
                                  main_program=test_prog,
                                  model_filename="model.pdmodel",
                                  params_filename="params.pdparams")

    print("save inference model at %s" % (args.inference_model_dir))
Exemple #14
0
def main():
    config = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    char_ops = CharacterOps(config['Global'])
    config['Global']['char_num'] = char_ops.get_char_num()

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    check_gpu(use_gpu)

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)

    rec_model = create_module(
        config['Architecture']['function'])(params=config)

    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            eval_outputs = rec_model(mode="test")
            eval_fetch_list = [v.name for v in eval_outputs]
    eval_prog = eval_prog.clone(for_test=True)
    exe.run(startup_prog)

    pretrain_weights = config['Global']['pretrain_weights']
    if pretrain_weights is not None:
        fluid.load(eval_prog, pretrain_weights)

    test_img_path = config['test_img_path']
    image_shape = config['Global']['image_shape']
    blobs = test_reader(image_shape, test_img_path)
    predict = exe.run(program=eval_prog,
                      feed={"image": blobs},
                      fetch_list=eval_fetch_list,
                      return_numpy=False)
    preds = np.array(predict[0])
    if preds.shape[1] == 1:
        preds = preds.reshape(-1)
        preds_lod = predict[0].lod()[0]
        preds_text = char_ops.decode(preds)
    else:
        end_pos = np.where(preds[0, :] == 1)[0]
        if len(end_pos) <= 1:
            preds_text = preds[0, 1:]
        else:
            preds_text = preds[0, 1:end_pos[1]]
        preds_text = preds_text.reshape(-1)
        preds_text = char_ops.decode(preds_text)

    fluid.io.save_inference_model("./output/",
                                  feeded_var_names=['image'],
                                  target_vars=eval_outputs,
                                  executor=exe,
                                  main_program=eval_prog,
                                  model_filename="model",
                                  params_filename="params")
    print(preds)
    print(preds_text)
Exemple #15
0
def infer():
    args = parse_args()
    print(args)

    if args.use_gpu == 1:
        place = fluid.CUDAPlace(0)
    else:
        place = fluid.CPUPlace()
    inference_scope = fluid.Scope()

    test_files = [
        os.path.join(args.test_data_dir, x)
        for x in os.listdir(args.test_data_dir)
    ]
    criteo_dataset = CriteoDataset()
    test_reader = fluid.io.batch(criteo_dataset.test(test_files),
                                 batch_size=args.batch_size)

    startup_program = fluid.framework.Program()
    test_program = fluid.framework.Program()
    cur_model_path = os.path.join(args.model_output_dir,
                                  'epoch_' + args.test_epoch, "checkpoint")

    with fluid.scope_guard(inference_scope):
        with fluid.framework.program_guard(test_program, startup_program):
            loss, auc, data_list, auc_states = eval('network_conf.' +
                                                    args.model_name)(
                                                        args.embedding_size,
                                                        args.num_field,
                                                        args.num_feat,
                                                        args.layer_sizes_dnn,
                                                        args.act, args.reg,
                                                        args.layer_sizes_cin)

            exe = fluid.Executor(place)
            feeder = fluid.DataFeeder(feed_list=data_list, place=place)

            exe.run(startup_program)
            fluid.load(fluid.default_main_program(), cur_model_path)

            for var in auc_states:  # reset auc states
                set_zero(var.name, scope=inference_scope, place=place)

            loss_all = 0
            num_ins = 0
            for batch_id, data_test in enumerate(test_reader()):
                loss_val, auc_val = exe.run(test_program,
                                            feed=feeder.feed(data_test),
                                            fetch_list=[loss.name, auc.name])

                num_ins += len(data_test)
                loss_all += loss_val * len(data_test)
                logger.info('TEST --> batch: {} loss: {} auc_val: {}'.format(
                    batch_id, loss_all / num_ins, auc_val))

            print(
                'The last log info is the total Logloss and AUC for all test data. '
            )
Exemple #16
0
def init_checkpoint(exe, init_checkpoint_path, main_program, use_fp16=False):
    fluid.load(program=main_program,
               model_path=init_checkpoint_path,
               executor=exe)

    print("Load model from {}".format(init_checkpoint_path))

    if use_fp16:
        cast_fp32_to_fp16(exe, main_program)
Exemple #17
0
def load_model(model_dir):
    if not osp.exists(osp.join(model_dir, "model.yml")):
        raise Exception("There's no model.yml in {}".format(model_dir))
    with open(osp.join(model_dir, "model.yml")) as f:
        info = yaml.load(f.read(), Loader=yaml.Loader)
    status = info['status']

    if not hasattr(models, info['Model']):
        raise Exception("There's no attribute {} in models".format(
            info['Model']))
    model = getattr(models, info['Model'])(**info['_init_params'])
    if status in ["Normal", "QuantOnline"]:
        startup_prog = fluid.Program()
        model.test_prog = fluid.Program()
        with fluid.program_guard(model.test_prog, startup_prog):
            with fluid.unique_name.guard():
                model.test_inputs, model.test_outputs = model.build_net(
                    mode='test')
        model.test_prog = model.test_prog.clone(for_test=True)
        if status == "QuantOnline":
            print('test quant online')
            import paddleslim as slim
            model.test_prog = slim.quant.quant_aware(model.test_prog,
                                                     model.exe.place,
                                                     for_test=True)
        model.exe.run(startup_prog)
        fluid.load(model.test_prog, osp.join(model_dir, 'model'))
        if status == "QuantOnline":
            model.test_prog = slim.quant.convert(model.test_prog,
                                                 model.exe.place)

    elif status in ['Infer', 'Quant']:
        [prog, input_names,
         outputs] = fluid.io.load_inference_model(model_dir,
                                                  model.exe,
                                                  params_filename='__params__')
        model.test_prog = prog
        test_outputs_info = info['_ModelInputsOutputs']['test_outputs']
        model.test_inputs = OrderedDict()
        model.test_outputs = OrderedDict()
        for name in input_names:
            model.test_inputs[name] = model.test_prog.global_block().var(name)
        for i, out in enumerate(outputs):
            var_desc = test_outputs_info[i]
            model.test_outputs[var_desc[0]] = out
    if 'test_transforms' in info:
        model.test_transforms = build_transforms(info['test_transforms'])
        model.eval_transforms = copy.deepcopy(model.test_transforms)

    if '_Attributes' in info:
        for k, v in info['_Attributes'].items():
            if k in model.__dict__:
                model.__dict__[k] = v

    logging.info("Model[{}] loaded.".format(info['Model']))
    return model
 def _load_program(self, dir, predicate_fn=None):
     try:
         save_path = os.path.join(dir, 'ckpt')
         F.load(
             self._program.train_program,
             save_path,
         )
     except F.core.EnforceNotMet as e:
         log.exception(e)
         raise RuntimeError('can not load model from %s, is this a textone checkpoint?' % dir)
Exemple #19
0
def infer():
    args = parse_args()

    num_layers = args.num_layers
    src_vocab_size = args.vocab_size
    tar_vocab_size = args.vocab_size
    batch_size = args.batch_size
    init_scale = args.init_scale
    max_grad_norm = args.max_grad_norm
    hidden_size = args.hidden_size
    attr_init = args.attr_init
    latent_size = 32

    if args.enable_ce:
        fluid.default_main_program().random_seed = 102
        framework.default_startup_program().random_seed = 102

    model = VAE(hidden_size,
                latent_size,
                src_vocab_size,
                tar_vocab_size,
                batch_size,
                num_layers=num_layers,
                init_scale=init_scale,
                attr_init=attr_init)

    beam_size = args.beam_size
    trans_res = model.build_graph(mode='sampling', beam_size=beam_size)
    # clone from default main program and use it as the validation program
    main_program = fluid.default_main_program()

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = Executor(place)
    exe.run(framework.default_startup_program())

    dir_name = args.reload_model
    print("dir name", dir_name)
    dir_name = os.path.join(dir_name, "checkpoint")
    fluid.load(main_program, dir_name, exe)
    vocab, tar_id2vocab = get_vocab(args.dataset_prefix)
    infer_output = np.ones((batch_size, 1), dtype='int64') * BOS_ID

    fetch_outs = exe.run(feed={'tar': infer_output},
                         fetch_list=[trans_res.name],
                         use_program_cache=False)

    with io.open(args.infer_output_file, 'w', encoding='utf-8') as out_file:

        for line in fetch_outs[0]:
            end_id = -1
            if EOS_ID in line:
                end_id = np.where(line == EOS_ID)[0][0]
            new_line = [tar_id2vocab[e[0]] for e in line[1:end_id]]
            out_file.write(space_tok.join(new_line))
            out_file.write(line_tok)
Exemple #20
0
def init_pretraining_params(exe,
                            pretraining_params_path,
                            main_program,
                            use_fp16=False):
    """load params of pretrained model, NOT including moment, learning_rate"""
    assert os.path.exists(pretraining_params_path
                          ), "[%s] cann't be found." % pretraining_params_path

    fluid.load(main_program, pretraining_params_path, exe)
    print("Load pretraining parameters from {}.".format(
        pretraining_params_path))
Exemple #21
0
def init_checkpoint(exe, init_checkpoint_path, main_program):
    """
    Init CheckPoint
    """
    assert os.path.exists(
        init_checkpoint_path), "[%s] cann't be found." % init_checkpoint_path
    try:
        checkpoint_path = os.path.join(init_checkpoint_path, "checkpoint")
        fluid.load(main_program, checkpoint_path, exe)
    except:
        fluid.load(main_program, init_checkpoint_path, exe)
    print("Load model from {}".format(init_checkpoint_path))
Exemple #22
0
def init_pretraining_params(exe,
                            pretraining_params_path,
                            main_program,
                            use_fp16=False):
    fluid.load(program=main_program,
               model_path=pretraining_params_path,
               executor=exe)
    print(
        "Load pretraining parameters from {}.".format(pretraining_params_path))

    if use_fp16:
        cast_fp32_to_fp16(exe, main_program)
Exemple #23
0
def evaluate(args):
    """OCR inference"""

    if args.model == "crnn_ctc":
        eval = ctc_eval
        get_feeder_data = get_ctc_feeder_data
    else:
        eval = attention_eval
        get_feeder_data = get_attention_feeder_data

    num_classes = data_reader.num_classes()
    data_shape = data_reader.data_shape()
    # define network
    evaluator, cost = eval(data_shape,
                           num_classes,
                           use_cudnn=True if args.use_gpu else False)

    # data reader
    test_reader = data_reader.test(test_images_dir=args.input_images_dir,
                                   test_list_file=args.input_images_list,
                                   model=args.model)

    # prepare environment
    place = fluid.CPUPlace()
    if args.use_gpu:
        place = fluid.CUDAPlace(0)

    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    # load init model
    model_dir = args.model_path
    if os.path.isdir(args.model_path):
        raise Exception("{} should not be a directory".format(args.model_path))
    fluid.load(program=fluid.default_main_program(),
               model_path=model_dir,
               executor=exe,
               var_list=fluid.io.get_program_parameter(
                   fluid.default_main_program()))
    print("Init model from: %s." % args.model_path)

    evaluator.reset(exe)
    count = 0
    for data in test_reader():
        count += 1
        exe.run(fluid.default_main_program(),
                feed=get_feeder_data(data, place))
    avg_distance, avg_seq_error = evaluator.eval(exe)
    print("Read %d samples; avg_distance: %s; avg_seq_error: %s" %
          (count, avg_distance, avg_seq_error))
Exemple #24
0
def load_checkpoint(exe, prog, path):
    """
    Load model from the given path.
    Args:
        exe (fluid.Executor): The fluid.Executor object.
        prog (fluid.Program): load weight to which Program object.
        path (string): URL string or loca model path.
    """
    if is_url(path):
        path = _get_weight_path(path)
    if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')):
        raise ValueError("Model pretrain path {} does not "
                         "exists.".format(path))
    fluid.load(prog, path, executor=exe)
def init_model(config, program, exe):
    """
    load model from checkpoint or pretrained_model
    """
    checkpoints = config.get('checkpoints')
    if checkpoints:
        fluid.load(program, checkpoints, exe)
        logger.info("Finish initing model from {}".format(checkpoints))
        return

    pretrained_model = config.get('pretrained_model')
    if pretrained_model:
        load_params(exe, program, pretrained_model)
        logger.info("Finish initing model from {}".format(pretrained_model))
def infer(args):
    # parameters from arguments
    model_name = args.model
    pretrained_model = args.pretrained_model
    image_shape = [int(m) for m in args.image_shape.split(",")]

    assert model_name in model_list, "{} is not in lists: {}".format(
        args.model, model_list)

    image = fluid.data(name='image',
                       shape=[None] + image_shape,
                       dtype='float32')

    infer_loader = fluid.io.DataLoader.from_generator(feed_list=[image],
                                                      capacity=64,
                                                      use_double_buffer=True,
                                                      iterable=True)

    # model definition
    model = models.__dict__[model_name]()
    out = model.net(input=image, embedding_size=args.embedding_size)

    test_program = fluid.default_main_program().clone(for_test=True)

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    if pretrained_model:

        def if_exist(var):
            return os.path.exists(os.path.join(pretrained_model, var.name))

        fluid.load(model_path=pretrained_model,
                   program=test_program,
                   executor=exe)

    infer_loader.set_sample_generator(reader.test(args),
                                      batch_size=args.batch_size,
                                      drop_last=False,
                                      places=place)

    fetch_list = [out.name]

    for batch_id, data in enumerate(infer_loader()):
        result = exe.run(test_program, fetch_list=fetch_list, feed=data)
        result = result[0][0].reshape(-1)
        print("Test-{0}-feature: {1}".format(batch_id, result[:5]))
        sys.stdout.flush()
Exemple #27
0
def infer(args, vocab_size, test_reader):
    """ inference function """
    place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)
    emb_size = args.emb_size
    hid_size = args.hid_size
    batch_size = args.batch_size
    model_path = args.model_dir
    with fluid.scope_guard(fluid.Scope()):
        main_program = fluid.Program()
        start_up_program = fluid.Program()
        with fluid.program_guard(main_program, start_up_program):
            acc = model(vocab_size, emb_size, hid_size)
            for epoch in range(start_index, last_index + 1):
                copy_program = main_program.clone()
                model_path = model_dir + "/epoch_" + str(epoch)
                fluid.load(copy_program, model_path, exe)
                accum_num_recall = 0.0
                accum_num_sum = 0.0
                t0 = time.time()
                step_id = 0
                for data in test_reader():
                    step_id += 1
                    user_data, pos_label = utils.infer_data(data, place)
                    all_item_numpy = np.tile(np.arange(vocab_size),
                                             len(pos_label)).reshape(
                                                 len(pos_label), vocab_size,
                                                 1).astype("int64")
                    para = exe.run(copy_program,
                                   feed={
                                       "user": user_data,
                                       "all_item": all_item_numpy,
                                       "pos_label": pos_label
                                   },
                                   fetch_list=[acc.name],
                                   return_numpy=False)

                    acc_ = para[0]._get_float_element(0)
                    data_length = len(
                        np.concatenate(pos_label, axis=0).astype("int64"))
                    accum_num_sum += (data_length)
                    accum_num_recall += (data_length * acc_)
                    if step_id % 1 == 0:
                        print("step:%d  " % (step_id),
                              accum_num_recall / accum_num_sum)
                t1 = time.time()
                print("model:%s recall@20:%.3f time_cost(s):%.2f" %
                      (model_path, accum_num_recall / accum_num_sum, t1 - t0))
Exemple #28
0
def load_params(exe, prog, path, ignore_params=[]):
    """
    Load model from the given path.
    Args:
        exe (fluid.Executor): The fluid.Executor object.
        prog (fluid.Program): load weight to which Program object.
        path (string): URL string or loca model path.
        ignore_params (bool): ignore variable to load when finetuning.
            It can be specified by finetune_exclude_pretrained_params 
            and the usage can refer to docs/TRANSFER_LEARNING.md
    """

    if is_url(path):
        path = _get_weight_path(path)
    if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')):
        raise ValueError("Model pretrain path {} does not "
                         "exists.".format(path))

    logger.info('Loading parameters from {}...'.format(path))

    ignore_list = None
    if ignore_params:
        all_var_names = [var.name for var in prog.list_vars()]
        ignore_list = filter(
            lambda var: any([re.match(name, var) for name in ignore_params]),
            all_var_names)
        ignore_list = list(ignore_list)

    if os.path.isdir(path):
        if not ignore_list:
            fluid.load(prog, path, executor=exe)
            return

        # XXX this is hackish, but seems to be the least contrived way...
        tmp = tempfile.mkdtemp()
        dst = os.path.join(tmp, os.path.basename(os.path.normpath(path)))
        shutil.copytree(path, dst, ignore=shutil.ignore_patterns(*ignore_list))
        fluid.load(prog, dst, executor=exe)
        shutil.rmtree(tmp)
        return

    state = _load_state(path)

    if ignore_list:
        for k in ignore_list:
            if k in state:
                del state[k]
    fluid.io.set_program_state(prog, state)
Exemple #29
0
def load(program, model_path, executor=None, var_list=None):
    """
    To load python2 saved models in python3.
    """
    try:
        fluid.load(program, model_path, executor, var_list)
    except UnicodeDecodeError:
        warnings.warn(
            "An UnicodeDecodeError is catched, which might be caused by loading "
            "a python2 saved model. Encoding of pickle.load would be set and "
            "load again automatically.")
        if six.PY3:
            load_bak = pickle.load
            pickle.load = partial(load_bak, encoding="latin1")
            fluid.load(program, model_path, executor, var_list)
            pickle.load = load_bak
Exemple #30
0
def inference():
    vis_file_list = cfg["test_list"]
    dataset = SegDataset(file_list=vis_file_list,
                         mode=ModelPhase.VISUAL,
                         data_dir=cfg["data_dir"])
    startup_prog = fluid.Program()
    test_prog = fluid.Program()
    pred, logit = build_model(test_prog, startup_prog, phase=ModelPhase.VISUAL)
    test_prog = test_prog.clone(for_test=True)

    place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(startup_prog)

    ckpt_dir = cfg["infer_model"]
    print("Load model form %s" % ckpt_dir)
    fluid.load(test_prog, os.path.join(ckpt_dir, 'model'), exe)

    fetch_list = [pred.name, logit.name]
    test_reader = dataset.batch(dataset.generator, batch_size=1, is_test=True)
    img_cnt = 0
    for imgs, grts, img_names, org_shapes in test_reader:
        img_cnt += 1
        #pred_shape = (imgs.shape[2], imgs.shape[3])
        pred, logits = exe.run(program=test_prog,
                               feed={'image': imgs},
                               fetch_list=fetch_list,
                               return_numpy=True)
        num_imgs = pred.shape[0]
        for i in range(num_imgs):
            print("Process %d:%s" % (img_cnt, img_names[i]))
            res_map = np.squeeze(pred[i, :, :, :]).astype(np.uint8)
            res_npy = np.squeeze(logits).transpose(1, 2, 0)
            p_w, p_h = pad_shape
            width, height = valid_shape
            #res_map = res_map[p_h:(p_h+height),p_w:(p_w+width)]
            #res_npy = res_npy[p_h:(p_h+height),p_w:(p_w+width)]
            org_shape = (org_shapes[i][0], org_shapes[i][1])
            res_map = cv2.resize(res_map, (org_shape[1], org_shape[0]),
                                 interpolation=cv2.INTER_NEAREST)
            res_npy = cv2.resize(res_npy, (org_shape[1], org_shape[0]),
                                 interpolation=cv2.INTER_NEAREST)
            img_id = img_names[i].split("/")[-1][:-4]
            res_npy = res_npy.astype(np.float16)
            cv2.imwrite(os.path.join(cfg["result_dir"], img_id + ".png"),
                        res_map)
            np.save(os.path.join(cfg["result_dir"], img_id + ".npy"), res_npy)