Exemplo n.º 1
0
def export_serving(model_path):
    """Export trained model to use it in TensorFlow Serving or cloudML. """
    pred_config = PredictConfig(session_init=get_model_loader(model_path),
                                model=InferenceOnlyModel(),
                                input_names=['input_img_bytes'],
                                output_names=['prediction_img_bytes'])
    ModelExporter(pred_config).export_serving('/tmp/exported')
Exemplo n.º 2
0
def export_compact(model_path):
    """Export trained model to use it as a frozen and pruned inference graph in
       mobile applications. """
    pred_config = PredictConfig(session_init=get_model_loader(model_path),
                                model=Model(),
                                input_names=['input_img'],
                                output_names=['prediction_img'])
    ModelExporter(pred_config).export_compact('/tmp/compact_graph.pb')
Exemplo n.º 3
0
def export_compact(model_path):
    """Export trained model to use it as a frozen and pruned inference graph in
       mobile applications. """
    pred_config = PredictConfig(
        session_init=SmartInit(model_path),
        model=Model(),
        input_names=['input', 'mask', 'z2'],
        output_names=['gen/genRGB/add'])
    ModelExporter(pred_config).export_compact(os.path.join(os.path.dirname(model_path), 'frozen_model.pb'))
Exemplo n.º 4
0
def export(args):
    model = AttentionOCR()
    predcfg = PredictConfig(
        model=model,
        session_init=SmartInit(args.checkpoint_path),
        input_names=model.get_inferene_tensor_names()[0],
        output_names=model.get_inferene_tensor_names()[1])

    ModelExporter(predcfg).export_compact(args.pb_path, optimize=False)
Exemplo n.º 5
0
def export(params, model_path, export_type, output_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    model_params = params['model']
    model = BaseVQVAE.from_params(model_params)

    pred_config = PredictConfig(
        session_init=get_model_loader(model_path),
        model=model,
        input_names=['input'],
        output_names=['x_recon', 'embeddings', 'latent_zq'])
    if export_type == 'compact':
        checkpoint_name = os.path.split(model_path)[1]
        ModelExporter(pred_config).export_compact(
            os.path.join(output_dir, 'model.pb'))
    else:
        ModelExporter(pred_config).export_serving(
            os.path.join(output_dir, 'exported'))
Exemplo n.º 6
0
 def run(self):
     exporter = ModelExporter(self.gen_pred_config())
     rm_n_mkdir(self.model_export_dir)
     exporter.export_compact(
         filename="{}/compact.pb".format(self.model_export_dir))
     exporter.export_serving(
         os.path.join(self.model_export_dir, "serving"),
         signature_name="serving_default",
     )
     print(f"Saved model to {self.model_export_dir}.")
Exemplo n.º 7
0
    def run(self, save_only): 
        if self.inf_auto_find_chkpt:
            self.inf_model_path = os.path.join(self.save_dir, str(max([int(x) for x in [name for name in os.listdir(self.save_dir) if os.path.isdir(os.path.join(self.save_dir, name))]])))
            print(f"Inference model path: <{self.inf_model_path}>")
            print('-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison' % \
                        (self.inf_auto_metric, self.inf_auto_comparator))
            model_path, stat = get_best_chkpts(self.inf_model_path, self.inf_auto_metric, self.inf_auto_comparator)
            print('Selecting: %s' % model_path)
            print('Having Following Statistics:')
            for key, value in stat.items():
                print('\t%s: %s' % (key, value))
        else:
            model_path = self.inf_model_path

        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model        = model_constructor(),
            session_init = get_model_loader(model_path),
            input_names  = self.eval_inf_input_tensor_names,
            output_names = self.eval_inf_output_tensor_names)
        predictor = OfflinePredictor(pred_config)
        
        if save_only:
            exporter = ModelExporter(pred_config)
            rm_n_mkdir(self.model_export_dir)
            print ('{}/compact.pb'.format(self.model_export_dir))
            exporter.export_compact(filename='{}/compact.pb'.format(self.model_export_dir))
            exporter.export_serving(os.path.join(self.model_export_dir, 'serving'), signature_name='serving_default')
            return

        for num, data_dir in enumerate(self.inf_data_list):
            save_dir = os.path.join(self.inf_output_dir, str(num))

            file_list = glob.glob(os.path.join(data_dir, '*{}'.format(self.inf_imgs_ext)))
            file_list.sort() # ensure same order

            rm_n_mkdir(save_dir)
            for filename in file_list:
                filename = os.path.basename(filename)
                basename = filename.split('.')[0]
                print(data_dir, basename, end=' ', flush=True)

                ##
                img = cv2.imread(os.path.join(data_dir, filename))
                img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

                ##
                pred_map = self.__gen_prediction(img, predictor)
                sio.savemat(os.path.join(save_dir,'{}.mat'.format(basename)), {'result':[pred_map]})
                print(f"Finished. {datetime.now().strftime('%H:%M:%S.%f')}")
Exemplo n.º 8
0
Arquivo: misc.py Projeto: murph3d/DLD
def save_model(model_paths, model, target="", compact=False):
    """Save a model to given dir"""
    from os import path
    from os import makedirs

    import tensorpack as tp

    from tensorpack.tfutils.varmanip import get_checkpoint_path
    from tensorpack.tfutils.export import ModelExporter

    import misc.logger as logger
    _L = logger.getLogger("Saver")

    save_to_modeldir = target is ""

    for model_path in model_paths:
        # get model path
        real_path = get_checkpoint_path(model_path)
        abs_p = path.realpath(model_path)
        if (not path.isfile(abs_p)):
            _L.error("{} is not a model file".format(model_path))
            continue

        # save to same folder as model
        if (save_to_modeldir):
            target = path.dirname(abs_p)

        # make sure the folder exists
        if not path.exists(target):
            makedirs(target)

        conf = tp.PredictConfig(session_init=tp.get_model_loader(model_path),
                                model=model,
                                input_names=["input"],
                                output_names=["emb"])

        exporter = ModelExporter(conf)
        if (compact):
            out = path.join(target, "{}.pb".format(path.basename(real_path)))
            _L.info("saving {} to {}".format(path.basename(real_path), out))
            exporter.export_compact(out)
        else:
            _L.info("compact saving {} to {}".format(path.basename(real_path),
                                                     target))
            exporter.export_serving(target)
Exemplo n.º 9
0
def export_eval_protobuf_model(checkpoint_dir, model_name, dataset, quant_type,
                               output_file, batch_size):
    _, test_data, (img_shape, label_shape) = datasets.DATASETS[dataset]()

    model_func, input_spec, output_spec = get_model_func(
        "eval", model_name, quant_type, img_shape, label_shape[0])
    input_names = [i.name for i in input_spec]
    output_names = [o.name for o in output_spec]
    predictor_config = PredictConfig(session_init=SaverRestore(checkpoint_dir +
                                                               "/checkpoint"),
                                     tower_func=model_func,
                                     input_signature=input_spec,
                                     input_names=input_names,
                                     output_names=output_names,
                                     create_graph=False)

    print("Exporting optimised protobuf graph...")
    K.set_learning_phase(False)
    ModelExporter(predictor_config).export_compact(output_file, optimize=False)

    K.clear_session()
    pred = OfflinePredictor(predictor_config)

    test_data = BatchData(test_data, batch_size, remainder=True)
    test_data.reset_state()

    num_correct = 0
    num_processed = 0
    for img, label in tqdm(test_data):
        num_correct += sum(pred(img)[0].argmax(axis=1) == label.argmax(axis=1))
        num_processed += img.shape[0]

    print("Exported model has accuracy {:.4f}".format(num_correct /
                                                      num_processed))

    return input_names, output_names, {i.name: i.shape for i in input_spec}
Exemplo n.º 10
0
    finalize_configs(is_training=False)

    if args.predict or args.visualize:
        cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS

    if args.visualize:
        do_visualize(MODEL, args.load)
    else:
        predcfg = PredictConfig(
            model=MODEL,
            session_init=get_model_loader(args.load),
            input_names=MODEL.get_inference_tensor_names()[0],
            output_names=MODEL.get_inference_tensor_names()[1])

        if args.compact:
            ModelExporter(predcfg).export_compact(args.compact, optimize=False)
        elif args.serving:
            ModelExporter(predcfg).export_serving(args.serving, optimize=False)

        if args.predict:
            predictor = OfflinePredictor(predcfg)
            for image_file in args.predict:
                do_predict(predictor, image_file)
        elif args.evaluate:
            assert args.evaluate.endswith('.json'), args.evaluate
            do_evaluate(predcfg, args.evaluate)
        elif args.benchmark:
            df = get_eval_dataflow(cfg.DATA.VAL[0])
            df.reset_state()
            predictor = OfflinePredictor(predcfg)
            for img in tqdm.tqdm(df, total=len(df)):
Exemplo n.º 11
0
    # let the output has the same path logic as checkpoint
    if args.predict_unlabeled:
        output_dir = args.predict_unlabeled
        predict_unlabeled(MODEL, args.load, output_dir=output_dir)

    if args.visualize:
        do_visualize(MODEL, args.load, output_dir=output_dir)
    else:
        predcfg = PredictConfig(
            model=MODEL,
            session_init=SmartInit(args.load),
            input_names=MODEL.get_inference_tensor_names()[0],
            output_names=MODEL.get_inference_tensor_names()[1])

        if args.output_pb:
            ModelExporter(predcfg).export_compact(args.output_pb,
                                                  optimize=False)
        elif args.output_serving:
            ModelExporter(predcfg).export_serving(args.output_serving,
                                                  optimize=False)

        if args.predict:
            predictor = OfflinePredictor(predcfg)
            for image_file in args.predict:
                do_predict(predictor, image_file)
        elif args.evaluate:
            assert args.evaluate.endswith('.json'), args.evaluate
            do_evaluate(predcfg, args.evaluate)
        elif args.eval_unlabeled:
            assert args.eval_unlabeled.endswith('.json'), args.eval_unlabeled
            do_evaluate_unlabeled(predcfg, args.eval_unlabeled)
        elif args.benchmark:
Exemplo n.º 12
0
    finalize_configs(is_training=False)

    if args.predict or args.visualize:
        cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS

    if args.visualize:
        do_visualize(MODEL, args.load)
    else:
        predcfg = PredictConfig(
            model=MODEL,
            session_init=SmartInit(args.load),
            input_names=MODEL.get_inference_tensor_names()[0],
            output_names=MODEL.get_inference_tensor_names()[1])

        if args.compact:
            ModelExporter(predcfg).export_compact(args.compact)
        elif args.serving:
            ModelExporter(predcfg).export_serving(
                args.serving, signature_name="serving_default")

        if args.predict:
            predictor = OfflinePredictor(predcfg)
            for image_file in args.predict:
                do_predict(predictor, image_file, visualize=True)
        elif args.evaluate:
            assert args.evaluate.endswith('.json'), args.evaluate
            do_evaluate(predcfg, args.evaluate)
        elif args.benchmark:
            df = get_eval_dataflow(cfg.DATA.VAL[0])
            df.reset_state()
            predictor = OfflinePredictor(predcfg)
Exemplo n.º 13
0
        cc = list(args.predict)
        cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS

    if args.visualize:
        do_visualize(MODEL, args.load)
    else:
        predcfg = PredictConfig(
            model=MODEL,
            session_init=SmartInit(args.load),
            input_names=MODEL.get_inference_tensor_names()[0],
            output_names=MODEL.get_inference_tensor_names()[1])

        if args.output_pb:
            output_pb_path = os.path.join(os.path.dirname(args.load),
                                          args.output_pb)
            ModelExporter(predcfg).export_compact(output_pb_path,
                                                  optimize=False)
        elif args.output_serving:
            output_serving_path = os.path.join(os.path.dirname(args.load),
                                               'export/Servo',
                                               args.output_serving)
            ModelExporter(predcfg).export_serving(
                output_serving_path,
                signature_name='serving_default')  #, optimize=False
        if args.predict:
            predictor = OfflinePredictor(predcfg)
            for image_file in args.predict:
                do_predict(predictor, image_file)
        elif args.sanity_check:
            predictor = OfflinePredictor(predcfg)
            do_sanity_check(pred_func=predictor,
                            output_dir=output_dir,
Exemplo n.º 14
0
 def export(self, path):
     ModelExporter(self.inference_config(self.args)).export_compact(path)