def infer_fast(net, img, net_input_height_size, stride, upsample_ratio, cpu,
               pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1/256):
    height, width, _ = img.shape
    scale = net_input_height_size / height

    scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
    scaled_img = normalize(scaled_img, img_mean, img_scale)
    min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]
    padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)

    tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()
    if not cpu:
        tensor_img = tensor_img.cuda()

    stages_output = net(tensor_img)

    stage2_heatmaps = stages_output[-2]
    heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
    heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)

    stage2_pafs = stages_output[-1]
    pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
    pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)

    return heatmaps, pafs, scale, pad
Exemple #2
0
def infer_fast(net, img, net_input_height_size, stride, upsample_ratio, cpu,
               pad_value=(0, 0, 0), img_mean=(128, 128, 128), img_scale=1/256, openvino=False):
    height, width, _ = img.shape
    scale = net_input_height_size / height

    scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
    if not openvino:  # OpenVINO model doesn't require input to be normalized
        scaled_img = normalize(scaled_img, img_mean, img_scale)
    min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]
    padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)

    if openvino:
        reshaped_img = np.transpose(padded_img, (2, 0, 1))
        res = net.infer(inputs={next(iter(net.inputs)): reshaped_img})
        pafs = res['Mconv7_stage2_L1']
        heatmaps = res['Mconv7_stage2_L2']
        pafs = np.transpose(pafs.squeeze(), (1, 2, 0))
        heatmaps = np.transpose(heatmaps.squeeze(), (1, 2, 0))
    else:
        tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float()
        if not cpu:
            tensor_img = tensor_img.cuda()

        stages_output = net(tensor_img)

        stage2_heatmaps = stages_output[-2]
        heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))

        stage2_pafs = stages_output[-1]
        pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))

    heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
    pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)

    return heatmaps, pafs, scale, pad
Exemple #3
0
def infer_fast(net,
               img,
               net_input_height_size,
               stride,
               upsample_ratio,
               cpu,
               pad_value=(0, 0, 0),
               img_mean=(128, 128, 128),
               img_scale=1 / 256):
    height, width, _ = img.shape
    scale = net_input_height_size / height

    scaled_img = cv2.resize(img, (0, 0),
                            fx=scale,
                            fy=scale,
                            interpolation=cv2.INTER_CUBIC)
    scaled_img = normalize(scaled_img, img_mean, img_scale)
    min_dims = [
        net_input_height_size,
        max(scaled_img.shape[1], net_input_height_size)
    ]
    padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)

    tensor_img = torch.from_numpy(padded_img).permute(2, 0,
                                                      1).unsqueeze(0).float()
    if not cpu:
        tensor_img = tensor_img.cuda()

    stages_output = net(tensor_img)

    stage2_heatmaps = stages_output[-2]
    heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(),
                            (1, 2, 0))
    heatmaps = cv2.resize(heatmaps, (0, 0),
                          fx=upsample_ratio,
                          fy=upsample_ratio,
                          interpolation=cv2.INTER_CUBIC)

    # for i in range(heatmaps.shape[-1]):
    #     heatmap = heatmaps[:,:,i]
    #     heat_map = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
    #     heat_map = np.fmin(np.fmax(heat_map, 0.0), 1.0)
    #     heat_map = np.expand_dims(np.uint8(heat_map * 255), -1)
    #     debug_map = img // 2 + heat_map // 2
    #     win_name = 'keypoint: {}'.format(kp_names[i])
    #     cv2.imshow(win_name, debug_map)
    #     cv2.moveWindow(win_name, 0, 0)
    #     cv2.waitKey(0)
    #     cv2.destroyWindow(win_name)

    stage2_pafs = stages_output[-1]
    pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
    pafs = cv2.resize(pafs, (0, 0),
                      fx=upsample_ratio,
                      fy=upsample_ratio,
                      interpolation=cv2.INTER_CUBIC)

    # for i in range(pafs.shape[-1] // 2):
    #     paf = np.zeros((pafs.shape[0], pafs.shape[1], 3), dtype=np.float32)
    #     pair = BODY_PARTS_PAF_IDS[i]
    #     paf[:,:,2] = np.abs(pafs[:,:,pair[0]])
    #     paf[:,:,1] = np.abs(pafs[:,:,pair[1]])
    #     paf = np.fmin(np.fmax(paf, -1.0), 1.0)
    #     paf = cv2.resize(paf, (img.shape[1], img.shape[0]))
    #     paf = np.uint8(abs(paf) * 255)
    #     debug_map = img // 2 + paf // 2
    #     win_name = 'part: {}'.format(pf_names[i])
    #     cv2.imshow(win_name, debug_map)
    #     cv2.moveWindow(win_name, 0, 0)
    #     cv2.waitKey(0)
    #     cv2.destroyWindow(win_name)

    return heatmaps, pafs, scale, pad
Exemple #4
0
def infer_fast(model,
               img,
               net_input_height_size,
               stride,
               upsample_ratio,
               cpu,
               pad_value=(0, 0, 0),
               img_mean=(128, 128, 128),
               img_scale=1 / 256):
    """[summary]

    Args:
        model ([type]): [description]
        img ([type]): [description]
        net_input_height_size ([type]): [description]
        stride ([type]): [description]
        upsample_ratio ([type]): [description]
        cpu ([type]): [description]
        pad_value (tuple, optional): [description]. Defaults to (0, 0, 0).
        img_mean (tuple, optional): [description]. Defaults to (128, 128, 128).
        img_scale ([type], optional): [description]. Defaults to 1/256.

    Returns:
        [type]: [description]
    """
    height, _, _ = img.shape
    scale = net_input_height_size / height

    scaled_img = cv2.resize(img, (0, 0),
                            fx=scale,
                            fy=scale,
                            interpolation=cv2.INTER_CUBIC)
    scaled_img = normalize(scaled_img, img_mean, img_scale)
    min_dims = [
        net_input_height_size,
        max(scaled_img.shape[1], net_input_height_size)
    ]
    padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)

    tensor_img = torch.from_numpy(padded_img).permute(2, 0,
                                                      1).unsqueeze(0).float()
    if not cpu:
        tensor_img = tensor_img.cuda()

    stages_output = model(tensor_img)

    stage2_heatmaps = stages_output[-2]
    heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(),
                            (1, 2, 0))
    heatmaps = cv2.resize(heatmaps, (0, 0),
                          fx=upsample_ratio,
                          fy=upsample_ratio,
                          interpolation=cv2.INTER_CUBIC)

    stage2_pafs = stages_output[-1]
    pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
    pafs = cv2.resize(pafs, (0, 0),
                      fx=upsample_ratio,
                      fy=upsample_ratio,
                      interpolation=cv2.INTER_CUBIC)

    return heatmaps, pafs, scale, pad