def __init__(self, engine_file_path):

        TRT_LOGGER = trt.Logger(trt.Logger.INFO)

        trt_yolo = TrtLite(engine_file_path=engine_file_path)
        trt_yolo.print_info()

        self.buffers = trt_yolo.allocate_io_buffers(1, True)
        self.trt_yolo = trt_yolo

        # 识别人手的21个关键点
        self.trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21)
        self.trt_lite21.print_info()

        # 识别手势
        self.trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GESTURE)
        self.trt_lite_gesture.print_info()
    def __init__(self, engine_file_path, batch_size=1):

        TRT_LOGGER = trt.Logger(trt.Logger.INFO)

        self.batch_size = batch_size
        input_volume = trt.volume(INPUT_SHAPE)
        self.numpy_array = np.zeros((self.batch_size, input_volume))

        trt_yolo = TrtLite(engine_file_path=engine_file_path)
        trt_yolo.print_info()

        self.buffers = trt_yolo.allocate_io_buffers(1, True)
        self.trt_yolo = trt_yolo

        # 识别人手的21个关键点
        self.trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21)
        self.trt_lite21.print_info()

        # 识别手势
        self.trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GESTURE)
        self.trt_lite_gesture.print_info()
Ejemplo n.º 3
0
def run_engine():
    batch_size = 2
    input_shape = (batch_size, 1, 2, 8)
    n = reduce(lambda x, y: x * y, input_shape)
    input_data = np.asarray(range(n), dtype=np.float32).reshape(input_shape)
    output_data = np.zeros(input_shape, dtype=np.float32)

    trt = TrtLite(build_engine, (input_shape[1:], ))
    trt.print_info()

    d_buffers = trt.allocate_io_buffers(batch_size, True)

    d_buffers[0].copy_(torch.from_numpy(input_data))
    trt.execute([t.data_ptr() for t in d_buffers], batch_size)
    output_data = d_buffers[1].cpu().numpy()

    print(output_data)
    def __init__(self, tensor):
        super(PyTorchTensorHolder, self).__init__()
        self.tensor = tensor

    def get_pointer(self):
        return self.tensor.data_ptr()


for engine_file_path in ['resnet50.trt', 'resnet50_fp16.trt']:
    if not os.path.exists(engine_file_path):
        print('Engine file', engine_file_path,
              'doesn\'t exist. Please run trtexec and re-run this script.')
        exit(1)

    print('====', engine_file_path, '===')
    trt = TrtLite(engine_file_path=engine_file_path)
    trt.print_info()
    i2shape = {0: (1, 3, 1080, 1920)}
    io_info = trt.get_io_info(i2shape)
    d_buffers = trt.allocate_io_buffers(i2shape, True)
    output_data_trt = np.zeros(io_info[1][2], dtype=np.float32)

    d_buffers[0].copy_(input_data.reshape(d_buffers[0].size()))
    trt.execute([t.data_ptr() for t in d_buffers], i2shape)
    output_data_trt = d_buffers[1].cpu().numpy()

    torch.cuda.synchronize()
    t0 = time.time()
    for i in range(nRound):
        trt.execute([t.data_ptr() for t in d_buffers], i2shape)
    torch.cuda.synchronize()
class YoLov5TRT(object):
    """
    description: A YOLOv5 class that warps TensorRT ops, preprocess and postprocess ops.
    """
    def __init__(self, engine_file_path, batch_size=1):

        TRT_LOGGER = trt.Logger(trt.Logger.INFO)

        self.batch_size = batch_size
        input_volume = trt.volume(INPUT_SHAPE)
        self.numpy_array = np.zeros((self.batch_size, input_volume))

        trt_yolo = TrtLite(engine_file_path=engine_file_path)
        trt_yolo.print_info()

        self.buffers = trt_yolo.allocate_io_buffers(1, True)
        self.trt_yolo = trt_yolo

        # 识别人手的21个关键点
        self.trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21)
        self.trt_lite21.print_info()

        # 识别手势
        self.trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GESTURE)
        self.trt_lite_gesture.print_info()

    def _load_imgs(self, image_paths):
        for idx, image_path in enumerate(image_paths):
            img_np, image_raw, h, w = self.preprocess_image(image_path)
            # print("---------------------------------",img_np.shape, image_raw.shape)
            self.numpy_array[idx] = img_np.ravel()
        return self.numpy_array

    def infer_batch(self, image_paths):

        # Load all images to CPU...
        imgs = self._load_imgs(image_paths)

        # -------------------- hand detect start
        t1 = time_synchronized()

        # ...copy them into appropriate place into memory...
        # (self.inputs was returned earlier by allocate_buffers())
        self.buffers[0] = torch.from_numpy(imgs.ravel()).cuda()

        bindings = [t.data_ptr() for t in self.buffers]

        self.trt_yolo.execute(bindings, BATCH_SIZE)

        host_outputs = self.buffers[1].clone().cpu().detach().numpy()

        torch.cuda.synchronize()

        t2 = time_synchronized()
        latency_in_sec = (t2 - t1)

        print("Latency - {:.2f} ms, handle_size: {}".format(
            latency_in_sec * 1000, len(image_paths)))

        # -------------------- hand detect end

        return latency_in_sec

    def doInference(self, image_path):
        threading.Thread.__init__(self)

        # Do image preprocess
        input_image, image_raw, origin_h, origin_w = self.preprocess_image(
            image_path)

        self.buffers[0] = torch.from_numpy(input_image.ravel()).cuda()

        bindings = [t.data_ptr() for t in self.buffers]

        self.trt_yolo.execute(bindings, BATCH_SIZE)

        host_outputs = self.buffers[1].clone().cpu().detach().numpy()

        torch.cuda.synchronize()

        print(host_outputs.shape)
        output = host_outputs.ravel()
        # Do postprocess
        result_boxes, result_scores, result_classid = self.post_process(
            output, origin_h, origin_w)

        print(output.shape, len(result_boxes))
        # Draw rectangles and labels on the original image
        for i in range(len(result_boxes)):
            box = result_boxes[i]
            print("box>>>", box)

            # 截出手的部位
            image_hand = image_raw[int(box[1]):int(box[3]),
                                   int(box[0]):int(box[2])]

            # 推理手的21个特征点
            hand_data = self.preprocess_hand(image_hand)
            output21 = self.doInference_resnet(self.trt_lite21,
                                               hand_data.ravel())

            # 推理手势
            output_gesture = self.doInference_resnet(self.trt_lite_gesture,
                                                     hand_data.ravel())
            print("gesture:", output_gesture)
            index = np.argmax(output_gesture)
            label = labels[index]

            hand_width = int(box[2]) - int(box[0])
            hand_height = int(box[3]) - int(box[1])
            drawhand(image_hand, output21, hand_width, hand_height)

            print("w,h:", hand_width, hand_height)
            cv2.imwrite("hand_11.jpg", image_hand)

            plot_one_box(
                box,
                image_raw,
                label="{}:{:.2f}".format(label, result_scores[i]),
            )
        parent, filename = os.path.split(image_path)
        save_name = os.path.join(parent, "output_" + filename)
        #  Save image
        cv2.imwrite(save_name, image_raw)

        print("save img success")

    def doInference_resnet(self, trt_engine, data):
        i2shape = 1
        io_info = trt_engine.get_io_info(i2shape)
        print(io_info)
        d_buffers = trt_engine.allocate_io_buffers(i2shape, True)
        print(io_info[1][2])

        d_buffers[0] = data.cuda()

        bindings = [t.data_ptr() for t in d_buffers]

        # 进行推理
        trt_engine.execute(bindings, i2shape)

        #
        output_data_trt = d_buffers[1].clone().cpu().detach().numpy()

        torch.cuda.synchronize()

        host_out = output_data_trt.ravel()

        return host_out

    def preprocess_hand(self, img):
        img_width = img.shape[1]
        img_height = img.shape[0]
        print(img.shape)
        # 输入图片预处理
        img_ = cv2.resize(img, (224, 224), interpolation=cv2.INTER_CUBIC)
        img_ = img_.astype(np.float32)
        img_ = (img_ - 128.) / 256.

        img_ = img_.transpose(2, 0, 1)
        img_ = torch.from_numpy(img_)
        img_ = img_.unsqueeze_(0)
        return img_

    def preprocess_image(self, input_image_path):
        """
        description: Read an image from image path, convert it to RGB,
                     resize and pad it to target size, normalize to [0,1],
                     transform to NCHW format.
        param:
            input_image_path: str, image path
        return:
            image:  the processed image
            image_raw: the original image
            h: original height
            w: original width
        """
        image_raw = cv2.imread(input_image_path)
        h, w, c = image_raw.shape
        image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)
        # Calculate widht and height and paddings
        r_w = INPUT_W / w
        r_h = INPUT_H / h
        if r_h > r_w:
            tw = INPUT_W
            th = int(r_w * h)
            tx1 = tx2 = 0
            ty1 = int((INPUT_H - th) / 2)
            ty2 = INPUT_H - th - ty1
        else:
            tw = int(r_h * w)
            th = INPUT_H
            tx1 = int((INPUT_W - tw) / 2)
            tx2 = INPUT_W - tw - tx1
            ty1 = ty2 = 0
        # Resize the image with long side while maintaining ratio
        image = cv2.resize(image, (tw, th))
        # Pad the short side with (128,128,128)
        image = cv2.copyMakeBorder(image, ty1, ty2, tx1, tx2,
                                   cv2.BORDER_CONSTANT, (128, 128, 128))
        image = image.astype(np.float32)
        # Normalize to [0,1]
        image /= 255.0
        # HWC to CHW format:
        image = np.transpose(image, [2, 0, 1])
        # CHW to NCHW format
        image = np.expand_dims(image, axis=0)
        # Convert the image to row-major order, also known as "C order":
        image = np.ascontiguousarray(image)
        return image, image_raw, h, w

    def xywh2xyxy(self, origin_h, origin_w, x):
        """
        description:    Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
        param:
            origin_h:   height of original image
            origin_w:   width of original image
            x:          A boxes tensor, each row is a box [center_x, center_y, w, h]
        return:
            y:          A boxes tensor, each row is a box [x1, y1, x2, y2]
        """
        y = torch.zeros_like(x) if isinstance(
            x, torch.Tensor) else np.zeros_like(x)
        r_w = INPUT_W / origin_w
        r_h = INPUT_H / origin_h
        if r_h > r_w:
            y[:, 0] = x[:, 0] - x[:, 2] / 2
            y[:, 2] = x[:, 0] + x[:, 2] / 2
            y[:, 1] = x[:, 1] - x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2
            y[:, 3] = x[:, 1] + x[:, 3] / 2 - (INPUT_H - r_w * origin_h) / 2
            y /= r_w
        else:
            y[:, 0] = x[:, 0] - x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2
            y[:, 2] = x[:, 0] + x[:, 2] / 2 - (INPUT_W - r_h * origin_w) / 2
            y[:, 1] = x[:, 1] - x[:, 3] / 2
            y[:, 3] = x[:, 1] + x[:, 3] / 2
            y /= r_h

        return y

    def post_process(self, output, origin_h, origin_w):
        """
        description: postprocess the prediction
        param:
            output:     A tensor likes [num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...] 
            origin_h:   height of original image
            origin_w:   width of original image
        return:
            result_boxes: finally boxes, a boxes tensor, each row is a box [x1, y1, x2, y2]
            result_scores: finally scores, a tensor, each element is the score correspoing to box
            result_classid: finally classid, a tensor, each element is the classid correspoing to box
        """
        # Get the num of boxes detected
        num = int(output[0])
        # Reshape to a two dimentional ndarray
        pred = np.reshape(output[1:], (-1, 6))[:num, :]
        # to a torch Tensor
        pred = torch.Tensor(pred).cuda()
        # Get the boxes
        boxes = pred[:, :4]
        # Get the scores
        scores = pred[:, 4]
        # Get the classid
        classid = pred[:, 5]
        # Choose those boxes that score > CONF_THRESH
        si = scores > CONF_THRESH
        boxes = boxes[si, :]
        scores = scores[si]
        classid = classid[si]
        # Trandform bbox from [center_x, center_y, w, h] to [x1, y1, x2, y2]
        boxes = self.xywh2xyxy(origin_h, origin_w, boxes)
        # Do nms
        indices = torchvision.ops.nms(boxes,
                                      scores,
                                      iou_threshold=IOU_THRESHOLD).cpu()
        result_boxes = boxes[indices, :].cpu()
        result_scores = scores[indices].cpu()
        result_classid = classid[indices].cpu()
        return result_boxes, result_scores, result_classid
Ejemplo n.º 6
0
    else:

        img = cv.imread("images/1.jpg")
        img_width = img.shape[1]
        img_height = img.shape[0]
        # 输入图片预处理
        img_ = cv.resize(img, (INPUT_H, INPUT_W), interpolation=cv.INTER_CUBIC)
        img_ = img_.astype(np.float32)
        img_ = (img_ - 128.) / 256.

        img_ = img_.transpose(2, 0, 1)
        img_ = torch.from_numpy(img_)
        img_ = img_.unsqueeze_(0)

        # 识别人手的21个关键点
        trt_lite21 = TrtLite(engine_file_path=ENGINE_PATH_21)
        trt_lite21.print_info()

        # 识别手势
        trt_lite_gesture = TrtLite(engine_file_path=ENGINE_PATH_GUESTURE)
        trt_lite_gesture.print_info()

        # data = np.ones(BATCH_SIZE * 3 * INPUT_H * INPUT_W,dtype=np.float32)
        # host_in = cuda.pagelocked_empty(BATCH_SIZE * 3 * INPUT_H * INPUT_W,dtype = np.float32)
        #
        # # np.copyto(host_in,data.ravel())
        # np.copyto(host_in, img_.ravel())
        #
        # host_out = cuda.pagelocked_empty(OUTPUT_SIZE,dtype = np.float32)
        #
        # doInference(context,host_in,host_out,BATCH_SIZE)