Ejemplo n.º 1
0
    )
    parser.add_argument('--eval_num',
                        type=int,
                        default=10000,
                        help='number of evaluation')
    parser.add_argument('--vis_num',
                        type=int,
                        default=60,
                        help='number of visible evaluation')
    parser.add_argument('--multiscale',
                        type=bool,
                        default=False,
                        help='enable multiscale_search')

    args = parser.parse_args()
    Config.set_model_name(args.model_name)
    Config.set_model_type(Config.MODEL[args.model_type])
    Config.set_model_backbone(Config.BACKBONE[args.model_backbone])
    Config.set_dataset_type(Config.DATA[args.dataset_type])
    Config.set_dataset_path(args.dataset_path)
    Config.set_dataset_version(args.dataset_version)

    config = Config.get_config()
    model = Model.get_model(config)
    evaluate = Model.get_evaluate(config)
    dataset = Dataset.get_dataset(config)

    evaluate(model,
             dataset,
             vis_num=args.vis_num,
             total_eval_num=args.eval_num,
Ejemplo n.º 2
0
import cv2
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from hyperpose import Config, Model, Dataset
from hyperpose.Dataset import imread_rgb_float, imwrite_rgb_float
Config.set_model_name("new_opps")
Config.set_model_type(Config.MODEL.Openpose)
config = Config.get_config()

#get and load model
model = Model.get_model(config)
weight_path = f"{config.model.model_dir}/newest_model.npz"
model.load_weights(weight_path)

#infer on single image
ori_image = cv2.cvtColor(cv2.imread("./sample.jpeg"), cv2.COLOR_BGR2RGB)
input_image = ori_image.astype(np.float32) / 255.0
if (model.data_format == "channels_first"):
    input_image = np.transpose(input_image, [2, 0, 1])

img_c, img_h, img_w = input_image.shape
conf_map, paf_map = model.infer(input_image[np.newaxis, :, :, :])

#get visualize function, which is able to get visualized part and limb heatmap image from inferred heatmaps
visualize = Model.get_visualize(Config.MODEL.Openpose)
vis_parts_heatmap, vis_limbs_heatmap = visualize(input_image,
                                                 conf_map[0],
                                                 paf_map[0],
                                                 save_tofile=False)
Ejemplo n.º 3
0
import cv2
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from hyperpose import Config,Model,Dataset
from hyperpose.Dataset import imread_rgb_float,imwrite_rgb_float
Config.set_model_name("openpose")
Config.set_model_type(Config.MODEL.Openpose)
config=Config.get_config()

#get and load model
model=Model.get_model(config)
weight_path=f"{config.model.model_dir}/newest_model.npz"
model.load_weights(weight_path)

#infer on single image
ori_image=cv2.cvtColor(cv2.imread("./sample.jpg"),cv2.COLOR_BGR2RGB)
input_image=ori_image.astype(np.float32)/255.0
if(model.data_format=="channels_first"):
    input_image=np.transpose(input_image,[2,0,1])

img_c,img_h,img_w=input_image.shape
conf_map,paf_map=model.infer(input_image[np.newaxis,:,:,:])

#get visualize function, which is able to get visualized part and limb heatmap image from inferred heatmaps
visualize=Model.get_visualize(Config.MODEL.Openpose)
vis_parts_heatmap,vis_limbs_heatmap=visualize(input_image,conf_map[0],paf_map[0],save_tofile=False,)

#get postprocess function, which is able to get humans that contains assembled detected parts from inferred heatmaps
postprocess=Model.get_postprocess(Config.MODEL.Openpose)
Ejemplo n.º 4
0
import pathlib
import tensorflow as tf
from functools import partial
from hyperpose import Config,Model,Dataset

#load model weights from hyperpose
Config.set_model_name("new_pifpaf")
Config.set_model_type(Config.MODEL.Pifpaf)
Config.set_dataset_type(Config.DATA.MSCOCO)
config=Config.get_config()
model=Model.get_model(config)
model.load_weights(f"{config.model.model_dir}/newest_model.npz")
model.eval()
#construct representative dataset used for quantization(here using the first 100 validate images)
scale_image_func=partial(Model.common.scale_image,hin=model.hin,win=model.win,scale_rate=0.95)
def decode_image(image_file,image_id):
    image = tf.io.read_file(image_file)
    image = tf.image.decode_jpeg(image, channels=3)  # get RGB with 0~1
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    scaled_image,pad = tf.py_function(scale_image_func,[image],[tf.float32,tf.float32])
    return scaled_image
dataset=Dataset.get_dataset(config)
val_dataset=dataset.get_eval_dataset()
rep_dataset=val_dataset.enumerate()
rep_dataset=rep_dataset.filter(lambda i,image_data : i<=100)
rep_dataset=rep_dataset.map(lambda i,image_data: image_data)
rep_dataset=rep_dataset.map(decode_image).batch(1)
print(f"test rep_dataset:{rep_dataset}")
#covert to tf-lite using int8-only quantization
input_signature=tf.TensorSpec(shape=(None,3,None,None))
converter=tf.lite.TFLiteConverter.from_concrete_functions([model.infer.get_concrete_function(x=input_signature)])