示例#1
0
文件: vis.py 项目: Mrggggg/DensePose
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
    """Visualizes keypoints (adapted from vis_one_image).
    kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
    """
    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)

    # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
    colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]

    # Perform the drawing on a copy of the image, to allow for blending.
    kp_mask = np.copy(img)

    # Draw mid shoulder / mid hip first for better visualization.
    mid_shoulder = (
        kps[:2, dataset_keypoints.index('right_shoulder')] +
        kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
    sc_mid_shoulder = np.minimum(
        kps[2, dataset_keypoints.index('right_shoulder')],
        kps[2, dataset_keypoints.index('left_shoulder')])
    mid_hip = (
        kps[:2, dataset_keypoints.index('right_hip')] +
        kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
    sc_mid_hip = np.minimum(
        kps[2, dataset_keypoints.index('right_hip')],
        kps[2, dataset_keypoints.index('left_hip')])
    nose_idx = dataset_keypoints.index('nose')
    if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
        cv2.line(
            kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),
            color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)
    if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
        cv2.line(
            kp_mask, tuple(mid_shoulder), tuple(mid_hip),
            color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)

    # Draw the keypoints.
    for l in range(len(kp_lines)):
        i1 = kp_lines[l][0]
        i2 = kp_lines[l][1]
        p1 = kps[0, i1], kps[1, i1]
        p2 = kps[0, i2], kps[1, i2]
        if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
            cv2.line(
                kp_mask, p1, p2,
                color=colors[l], thickness=2, lineType=cv2.LINE_AA)
        if kps[2, i1] > kp_thresh:
            cv2.circle(
                kp_mask, p1,
                radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
        if kps[2, i2] > kp_thresh:
            cv2.circle(
                kp_mask, p2,
                radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)

    # Blend the keypoints.
    return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)
示例#2
0
文件: vis.py 项目: Mrggggg/DensePose
def vis_one_image(
        im, im_name, output_dir, boxes, segms=None, keypoints=None, body_uv=None, thresh=0.9,
        kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
        ext='pdf'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='g',
                          linewidth=0.5, alpha=box_alpha))

        if show_class:
            ax.text(
                bbox[0], bbox[1] - 2,
                get_class_string(classes[i], score, dataset),
                fontsize=3,
                family='serif',
                bbox=dict(
                    facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(
                e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(
                    c.reshape((-1, 2)),
                    fill=True, facecolor=color_mask,
                    edgecolor='w', linewidth=1.2,
                    alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(
                        kps[0, i1], kps[1, i1], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(
                        kps[0, i2], kps[1, i2], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (
                kps[:2, dataset_keypoints.index('right_hip')] +
                kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh and
                    kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines) + 1], linewidth=1.0,
                    alpha=0.7)
                
    #   DensePose Visualization Starts!!
    ##  Get full IUV image out 
    IUV_fields = body_uv[1]
    #
    All_Coords = np.zeros(im.shape)
    All_inds = np.zeros([im.shape[0],im.shape[1]])
    K = 26
    ##
    inds = np.argsort(boxes[:,4])
    ##
    for i, ind in enumerate(inds):
        entry = boxes[ind,:]
        if entry[4] > 0.65:
            entry=entry[0:4].astype(int)
            ####
            output = IUV_fields[ind]
            ####
            All_Coords_Old = All_Coords[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2],:]
            All_Coords_Old[All_Coords_Old==0]=output.transpose([1,2,0])[All_Coords_Old==0]
            All_Coords[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2],:]= All_Coords_Old
            ###
            CurrentMask = (output[0,:,:]>0).astype(np.float32)
            All_inds_old = All_inds[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2]]
            All_inds_old[All_inds_old==0] = CurrentMask[All_inds_old==0]*i
            All_inds[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2]] = All_inds_old
    #
    All_Coords[:,:,1:3] = 255. * All_Coords[:,:,1:3]
    All_Coords[All_Coords>255] = 255.
    All_Coords = All_Coords.astype(np.uint8)
    All_inds = All_inds.astype(np.uint8)
    #
    IUV_SaveName = os.path.basename(im_name).split('.')[0]+'_IUV.png'
    INDS_SaveName = os.path.basename(im_name).split('.')[0]+'_INDS.png'
    cv2.imwrite(os.path.join(output_dir, '{}'.format(IUV_SaveName)), All_Coords )
    cv2.imwrite(os.path.join(output_dir, '{}'.format(INDS_SaveName)), All_inds )
    print('IUV written to: ' , os.path.join(output_dir, '{}'.format(IUV_SaveName)) )
    ###
    ### DensePose Visualization Done!!
    #
    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')
示例#3
0
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
    """Visualizes keypoints (adapted from vis_one_image).
    kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
    """
    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)

    # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
    colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]

    # Perform the drawing on a copy of the image, to allow for blending.
    kp_mask = np.copy(img)

    # Draw mid shoulder / mid hip first for better visualization.
    mid_shoulder = (kps[:2, dataset_keypoints.index('right_shoulder')] +
                    kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
    sc_mid_shoulder = np.minimum(
        kps[2, dataset_keypoints.index('right_shoulder')],
        kps[2, dataset_keypoints.index('left_shoulder')])
    mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
               kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
    sc_mid_hip = np.minimum(kps[2, dataset_keypoints.index('right_hip')],
                            kps[2, dataset_keypoints.index('left_hip')])
    nose_idx = dataset_keypoints.index('nose')
    if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
        cv2.line(kp_mask,
                 tuple(mid_shoulder),
                 tuple(kps[:2, nose_idx]),
                 color=colors[len(kp_lines)],
                 thickness=2,
                 lineType=cv2.LINE_AA)
    if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
        cv2.line(kp_mask,
                 tuple(mid_shoulder),
                 tuple(mid_hip),
                 color=colors[len(kp_lines) + 1],
                 thickness=2,
                 lineType=cv2.LINE_AA)

    # Draw the keypoints.
    for l in range(len(kp_lines)):
        i1 = kp_lines[l][0]
        i2 = kp_lines[l][1]
        p1 = kps[0, i1], kps[1, i1]
        p2 = kps[0, i2], kps[1, i2]
        if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
            cv2.line(kp_mask,
                     p1,
                     p2,
                     color=colors[l],
                     thickness=2,
                     lineType=cv2.LINE_AA)
        if kps[2, i1] > kp_thresh:
            cv2.circle(kp_mask,
                       p1,
                       radius=3,
                       color=colors[l],
                       thickness=-1,
                       lineType=cv2.LINE_AA)
        if kps[2, i2] > kp_thresh:
            cv2.circle(kp_mask,
                       p2,
                       radius=3,
                       color=colors[l],
                       thickness=-1,
                       lineType=cv2.LINE_AA)

    # Blend the keypoints.
    return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)
示例#4
0
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes,
                  segms=None,
                  keypoints=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext='pdf',
                  out_when_no_box=False):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if (boxes is None or boxes.shape[0] == 0
            or max(boxes[:, 4]) < thresh) and not out_when_no_box:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    if boxes is None:
        sorted_inds = []  # avoid crash when 'boxes' is None
    else:
        # Display in largest to smallest order to reduce occlusion
        areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
        sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor='g',
                          linewidth=0.5,
                          alpha=box_alpha))

        if show_class:
            ax.text(bbox[0],
                    bbox[1] - 2,
                    get_class_string(classes[i], score, dataset),
                    fontsize=3,
                    family='serif',
                    bbox=dict(facecolor='g',
                              alpha=0.4,
                              pad=0,
                              edgecolor='none'),
                    color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(kps[0, i1],
                             kps[1, i1],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(kps[0, i2],
                             kps[1, i2],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')
示例#5
0
文件: vis.py 项目: tebin/DensePose
def vis_one_image(
        im, im_name, output_dir, boxes, segms=None, keypoints=None, body_uv=None, thresh=0.9,
        kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
        ext='pdf', out_when_no_box=False):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if (boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh) and not out_when_no_box:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    if boxes is None:
        sorted_inds = [] # avoid crash when 'boxes' is None
    else:
        # Display in largest to smallest order to reduce occlusion
        areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
        sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='g',
                          linewidth=0.5, alpha=box_alpha))

        if show_class:
            ax.text(
                bbox[0], bbox[1] - 2,
                get_class_string(classes[i], score, dataset),
                fontsize=3,
                family='serif',
                bbox=dict(
                    facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            contour = cv2.findContours(
                e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)[-2]

            for c in contour:
                polygon = Polygon(
                    c.reshape((-1, 2)),
                    fill=True, facecolor=color_mask,
                    edgecolor='w', linewidth=1.2,
                    alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(
                        kps[0, i1], kps[1, i1], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(
                        kps[0, i2], kps[1, i2], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (
                kps[:2, dataset_keypoints.index('right_hip')] +
                kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh and
                    kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines) + 1], linewidth=1.0,
                    alpha=0.7)

    ### DensePose Visualization Starts!!
    # get full IUV image outputs for all bboxes
    IUV_fields = body_uv[1]
    # initialize IUV output and INDS output images with zeros
    All_coords = np.zeros(im.shape, dtype=np.float32)  # shape: (im_height, im_width, 3)
    All_inds = np.zeros([im.shape[0], im.shape[1]], dtype=np.float32)  # shape: (im_height, im_width)

    # display in smallest to largest class scores order, however, this may cause sharpness in some body parts
    # due to the output of an inaccurate bbox with lower score will not be overlapped by the output of a more
    # precise bbox with higher score which will be discarded.
    inds = np.argsort(boxes[:, 4])
    for i, ind in enumerate(inds):
        score = boxes[ind, 4]
        bbox = boxes[ind, :4]
        if score > 0.65:
            # top left corner (x1, y1) of current bbox in image space
            x1, y1 = boxes[ind, :2].astype(int)
            # get IUV output for current bbox
            output = IUV_fields[ind]
            out_height, out_width = output.shape[1:3]

            # first, locate the region of current bbox on final IUV output image
            All_coords_tmp = All_coords[y1:y1 + out_height, x1:x1 + out_width]
            # then, extract IUV output of pixels within this bbox in which have not been filled with IUV of other bbox
            All_coords_tmp[All_coords_tmp == 0] = output.transpose([1, 2, 0])[All_coords_tmp == 0]
            # update final IUV output image
            All_coords[y1:y1 + out_height, x1:x1 + out_width] = All_coords_tmp

            # get (distinct) human-body FG mask indices for each bbox
            index_UV = output[0]  # predicted part index: 0 ~ 24
            CurrentMask = (index_UV > 0).astype(np.float32)
            All_inds_tmp = All_inds[y1:y1 + out_height, x1:x1 + out_width]
            All_inds_tmp[All_inds_tmp == 0] = CurrentMask[All_inds_tmp == 0] * (i + 1)  # avoid `i` starting with 0
            All_inds[y1:y1 + out_height, x1:x1 + out_width] = All_inds_tmp

    # scale predicted UV coordinates to [0, 255]
    All_coords[:, :, 1:3] = All_coords[:, :, 1:3] * 255.
    All_coords[All_coords > 255] = 255.
    All_coords = All_coords.astype(np.uint8)
    All_inds = All_inds.astype(np.uint8)
    # save IUV images into files
    IUV_SaveName = os.path.basename(im_name).split('.')[0] + '_IUV.png'
    INDS_SaveName = os.path.basename(im_name).split('.')[0] + '_INDS.png'
    cv2.imwrite(os.path.join(output_dir, '{}'.format(IUV_SaveName)), All_coords)
    cv2.imwrite(os.path.join(output_dir, '{}'.format(INDS_SaveName)), All_inds)
    print('IUV written to: ', os.path.join(output_dir, '{}'.format(IUV_SaveName)))
    ### DensePose Visualization Done!!

    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')
示例#6
0
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes,
                  segms=None,
                  keypoints=None,
                  body_uv=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext='pdf'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor='g',
                          linewidth=0.5,
                          alpha=box_alpha))

        if show_class:
            ax.text(bbox[0],
                    bbox[1] - 2,
                    get_class_string(classes[i], score, dataset),
                    fontsize=3,
                    family='serif',
                    bbox=dict(facecolor='g',
                              alpha=0.4,
                              pad=0,
                              edgecolor='none'),
                    color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(kps[0, i1],
                             kps[1, i1],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(kps[0, i2],
                             kps[1, i2],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

    #   DensePose Visualization Starts!!
    ##  Get full IUV image out
    IUV_fields = body_uv[1]
    #
    All_Coords = np.zeros(im.shape)
    All_inds = np.zeros([im.shape[0], im.shape[1]])
    K = 26
    ##
    inds = np.argsort(-boxes[:, 4])
    ##
    for i, ind in enumerate(inds):
        entry = boxes[ind, :]
        if entry[4] >= thresh:
            entry = entry[0:4].astype(int)
            ####
            output = IUV_fields[ind]
            ####
            All_Coords_Old = All_Coords[entry[1]:entry[1] + output.shape[1],
                                        entry[0]:entry[0] + output.shape[2], :]
            All_Coords_Old[All_Coords_Old == 0] = output.transpose(
                [1, 2, 0])[All_Coords_Old == 0]
            All_Coords[entry[1]:entry[1] + output.shape[1],
                       entry[0]:entry[0] + output.shape[2], :] = All_Coords_Old
            ###
            CurrentMask = (output[0, :, :] > 0).astype(np.float32)
            All_inds_old = All_inds[entry[1]:entry[1] + output.shape[1],
                                    entry[0]:entry[0] + output.shape[2]]
            All_inds_old[All_inds_old ==
                         0] = CurrentMask[All_inds_old == 0] * (i + 1)
            All_inds[entry[1]:entry[1] + output.shape[1],
                     entry[0]:entry[0] + output.shape[2]] = All_inds_old
    #
    All_Coords[:, :, 1:3] = 255. * All_Coords[:, :, 1:3]
    All_Coords[All_Coords > 255] = 255.
    All_Coords = All_Coords.astype(np.uint8)
    All_inds = All_inds.astype(np.uint8)
    #
    IUV_SaveName = os.path.basename(im_name).split('.')[0] + '_IUV.png'
    INDS_SaveName = os.path.basename(im_name).split('.')[0] + '_INDS.png'
    cv2.imwrite(os.path.join(output_dir, '{}'.format(IUV_SaveName)),
                All_Coords)
    cv2.imwrite(os.path.join(output_dir, '{}'.format(INDS_SaveName)), All_inds)
    print('IUV written to: ',
          os.path.join(output_dir, '{}'.format(IUV_SaveName)))
    ###
    ### DensePose Visualization Done!!
    #
    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')
示例#7
0
def vis_one_image_Tarik_skeleton(im,
                                 im_name,
                                 output_dir,
                                 boxes,
                                 segms=None,
                                 keypoints=None,
                                 thresh=0.9,
                                 kp_thresh=2,
                                 dpi=200,
                                 box_alpha=0.0,
                                 dataset=None,
                                 show_class=False,
                                 ext='pdf',
                                 out_when_no_box=False,
                                 is_color_skeleton=False,
                                 color_skeleton='w'):
    """Visual debugging of detections."""

    import matplotlib
    from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
    from matplotlib.figure import Figure
    from PIL import Image, ImageDraw

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if (boxes is None or boxes.shape[0] == 0
            or max(boxes[:, 4]) < thresh) and not out_when_no_box:
        print(
            "__________________________________________________________________________________________________________"
        )
        print("NO DETECTION")
        print(
            "__________________________________________________________________________________________________________"
        )
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    fig2 = plt.figure(frameon=False)
    fig2.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax2 = plt.Axes(fig2, [0., 0., 1., 1.])
    ax2.axis('off')
    fig2.add_axes(ax2)
    ax2.imshow(np.zeros_like(im))

    # fig3 = plt.figure(frameon=False)
    # fig3.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    # ax3 = plt.Axes(fig3, [0., 0., 1., 1.])
    # ax3.axis('off')
    # fig3.add_axes(ax3)

    fig_mask = Figure()
    canvas = FigureCanvas(fig_mask)
    ax_mask = fig_mask.gca()
    ax_mask.imshow(np.zeros_like(im))

    if boxes is None:
        sorted_inds = []  # avoid crash when 'boxes' is None
    else:
        # Display in largest to smallest order to reduce occlusion
        areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
        sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    print("TotalNumber of Objects detected:", sorted_inds.shape[-1])

    # sort indices from highest to lowest detection score (confidence)
    scores = boxes[:, -1]
    sorted_inds = np.argsort(-scores)

    # keep only the 'person' class detections
    idxs = []

    writeable_keypoints = []

    for i in sorted_inds:
        class_string_i = get_class_string(classes[i], scores[i], dataset)
        #print(class_string_i)
        if "person" in get_class_string(classes[i], scores[i], dataset):
            if scores[i] >= thresh:
                idxs.append(i)
            #print(class_string_i, i)

    if not idxs:
        print("###############")
        print(
            "No person objects detected with confidence higher than the threshold"
        )
        print("###############")
        return
    #thresh = 0.5
    # only process the person detection with the highest confidence
    for i in idxs[0:1]:
        #for i in sorted_inds[0:1]:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            print(score, '<', thresh, '=>skipping')
            continue

        print(get_class_string(classes[i], score, dataset), score)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            print("kps[", i, "].shape", kps.shape)
            plt.autoscale(False)
            tot_points = 0
            if not is_color_skeleton:
                colors = [color_skeleton] * 2 * len(kp_lines)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    tot_points += 1
                    plt.plot(kps[0, i1],
                             kps[1, i1],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)
                    writeable_keypoints.append((kps[0, i1], kps[1, i1]))
                    #print("tot_points:",tot_points)
                if kps[2, i2] > kp_thresh:
                    tot_points += 1
                    plt.plot(kps[0, i2],
                             kps[1, i2],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)
                    writeable_keypoints.append((kps[0, i2], kps[1, i2]))
                    #print("tot_points:",tot_points)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

    #output_name = os.path.basename(im_name) + '.' + ext
    #fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)

    output_name_skeleton = os.path.basename(im_name) + '_skeleton.' + ext
    fig2.savefig(os.path.join(output_dir, '{}'.format(output_name_skeleton)),
                 dpi=dpi)

    output_name_im_skeleton = os.path.basename(im_name) + '_im_skeleton.' + ext
    ax2.imshow(im)
    fig2.savefig(os.path.join(output_dir,
                              '{}'.format(output_name_im_skeleton)),
                 dpi=dpi)

    output_name_keypoints = os.path.basename(im_name) + '_keypoints.txt'
    with open(os.path.join(output_dir, '{}'.format(output_name_keypoints)),
              'w') as f:
        for pt in writeable_keypoints:
            print(pt[0], pt[1], end="", file=f)

    plt.close('all')
示例#8
0
文件: vis.py 项目: ZiliPeng/DensePose
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes,
                  segms=None,
                  keypoints=None,
                  body_uv=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext='pdf'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    # KP_data=[]

    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor='g',
                          linewidth=0.5,
                          alpha=box_alpha))

        if show_class:
            ax.text(bbox[0],
                    bbox[1] - 2,
                    get_class_string(classes[i], score, dataset),
                    fontsize=3,
                    family='serif',
                    bbox=dict(facecolor='g',
                              alpha=0.4,
                              pad=0,
                              edgecolor='none'),
                    color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]

            # person = defaultdict(list)
            # person['person_id']=[i]
            # person['pose_keypoints_x']=kps[0].tolist()
            # person['pose_keypoints_y']=kps[1].tolist()
            # person['pose_keypoints_logit']=kps[2].tolist()
            # person['pose_keypoints_prob']=(kps[3]).tolist()
            # KP_data.append(person)

            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(kps[0, i1],
                             kps[1, i1],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(kps[0, i2],
                             kps[1, i2],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

    #   DensePose Visualization Starts!!
    ##  Get full IUV image out
    IUV_fields = body_uv[1]
    #
    All_Coords = np.zeros(im.shape)
    All_inds = np.zeros([im.shape[0], im.shape[1]])
    K = 26
    ##
    inds = np.argsort(boxes[:, 4])
    ##
    KP_data = []
    mask_color_id = 0

    for i, ind in enumerate(inds, 1):  # display all persons---zili

        entry = boxes[ind, :]
        if entry[4] > 0.9:
            entry = entry[0:4].astype(int)
            ####
            output = IUV_fields[ind]
            ####
            All_Coords_Old = All_Coords[entry[1]:entry[1] + output.shape[1],
                                        entry[0]:entry[0] + output.shape[2], :]
            All_Coords_Old[All_Coords_Old == 0] = output.transpose(
                [1, 2, 0])[All_Coords_Old == 0]
            All_Coords[entry[1]:entry[1] + output.shape[1],
                       entry[0]:entry[0] + output.shape[2], :] = All_Coords_Old
            ###
            CurrentMask = (output[0, :, :] > 0).astype(np.float32)
            All_inds_old = All_inds[entry[1]:entry[1] + output.shape[1],
                                    entry[0]:entry[0] + output.shape[2]]
            All_inds_old[All_inds_old ==
                         0] = CurrentMask[All_inds_old == 0] * i
            All_inds[entry[1]:entry[1] + output.shape[1],
                     entry[0]:entry[0] + output.shape[2]] = All_inds_old

            print(ind)
            if keypoints is not None and len(keypoints) > ind:
                kps = keypoints[ind]
                person = defaultdict(list)
                person['person_id'] = [i]
                person['pose_keypoints_x'] = kps[0].tolist()
                person['pose_keypoints_y'] = kps[1].tolist()
                person['pose_keypoints_logit'] = kps[2].tolist()
                KP_data.append(person)

            if segms is not None and len(segms) > ind:
                e = masks[:, :, ind]
                person_ind = np.zeros([im.shape[0], im.shape[1]],
                                      dtype=np.uint8)
                person_ind[e[:, :] != 0] = 255

                OUT_Mask_path = output_dir + os.path.basename(im_name).split(
                    '.')[0]
                folder = os.path.exists(OUT_Mask_path)
                if not folder:
                    os.makedirs(OUT_Mask_path)
                OUT_Mask_DIR = OUT_Mask_path + '/' + 'person_' + str(
                    i) + '.png'
                cv2.imwrite(OUT_Mask_DIR, person_ind)

    #
    All_Coords[:, :, 1:3] = 255. * All_Coords[:, :, 1:3]
    All_Coords[All_Coords > 255] = 255.
    All_Coords = All_Coords.astype(np.uint8)
    All_inds = All_inds.astype(np.uint8)
    #
    IUV_SaveName = os.path.basename(im_name).split('.')[0] + '_IUV.png'
    INDS_SaveName = os.path.basename(im_name).split('.')[0] + '_INDS.png'
    cv2.imwrite(os.path.join(output_dir, '{}'.format(IUV_SaveName)),
                All_Coords)
    cv2.imwrite(os.path.join(output_dir, '{}'.format(INDS_SaveName)), All_inds)
    print('IUV written to: ',
          os.path.join(output_dir, '{}'.format(IUV_SaveName)))
    IUV_inter = np.copy(All_Coords)
    INDS_inter = np.copy(All_inds)
    ###
    ### DensePose Visualization Done!!
    #

    # my code:---------------------start
    plt.imshow(im[:, :, :])
    plt.contour(All_Coords[:, :, 1] / 256., 10, linewidths=1)
    plt.contour(All_Coords[:, :, 2] / 256., 10, linewidths=1)
    plt.contour(All_inds, linewidths=2)

    IUVplusIMG_SaveName = os.path.basename(im_name).split(
        '.')[0] + '_IUVplusIMG.png'
    IUVplusBLK_SaveName = os.path.basename(im_name).split(
        '.')[0] + '_IUVplusBLK.png'

    plt.savefig(os.path.join(output_dir, '{}'.format(IUVplusIMG_SaveName)),
                dpi=dpi)

    mycanvas = np.zeros((im.shape[0], im.shape[1], 3), dtype=np.uint8)
    plt.imshow(mycanvas[:, :, :])
    plt.contour(All_Coords[:, :, 1] / 256., 10, linewidths=1)
    plt.contour(All_Coords[:, :, 2] / 256., 10, linewidths=1)
    plt.contour(All_inds, linewidths=2)
    plt.savefig(os.path.join(output_dir, '{}'.format(IUVplusBLK_SaveName)),
                dpi=dpi)
    # my code---------------------end

    # # # my code2---------------------start
    # # #draw frame and contours to canvas
    # plt.contour( All_Coords[:,:,1]/256.,10, linewidths = 1 )
    # plt.contour( All_Coords[:,:,2]/256.,10, linewidths = 1 )
    # plt.contour( All_inds, linewidths = 2 )
    # plt.axis('off')
    # fig.canvas.draw()
    # # # convert canvas to image
    # img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    # img  = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
    # # # img is rgb, convert to opencv's default bgr
    # img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
    # # # display image with opencv
    # cv2.imshow("plot",img)
    # cv2.waitKey(1)
    # # IUVplusIMG_SaveName = os.path.basename(im_name).split('.')[0]+'_IUVplusIMG.png'
    # # cv2.imwrite(os.path.join(output_dir, '{}'.format(IUVplusIMG_SaveName)),img, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
    # # # my code2---------------------end

    # output_name = os.path.basename(im_name) + '.' + ext
    # fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')

    return IUV_inter, INDS_inter, KP_data
示例#9
0
def vis_detbbox_one_image(im,
                          im_name,
                          output_dir,
                          boxes,
                          segms=None,
                          keypoints=None,
                          class_thresholds=None,
                          thresh=0.5,
                          FilterPrecScore=True,
                          FilterSize=True,
                          kp_thresh=2,
                          dpi=200,
                          box_alpha=0.0,
                          dataset=None,
                          show_class=True,
                          ext='png'):
    """Visual debugging of detections."""
    scoreTopK = 5
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    out_scores = boxes[:, -1]
    print("len(out_scores) = {}".format(len(out_scores)))
    #sorted_inds = np.argsort(-areas)
    sorted_inds = np.argsort(-out_scores)
    height, width, channels = im.shape
    size_ratio_thrsh = 0.15
    mask_color_id = 0
    for i in sorted_inds[:scoreTopK]:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        cls_name = dataset.classes[classes[i]]
        cls_thrsh = class_thresholds[
            cls_name] if class_thresholds is not None else thresh
        if score < thresh:  #cls_thrsh:
            continue

        if score < cls_thrsh:
            if FilterPrecScore:
                continue
            ecolor = 'm'
            tcolor = 'm'  # text color

        box_width = bbox[2] - bbox[0]
        box_height = bbox[3] - bbox[1]
        lstyle = '-'
        ecolor = 'r'
        tcolor = 'g'
        if float(box_width) / float(width) < size_ratio_thrsh or float(
                box_height) / float(height) < size_ratio_thrsh:
            if FilterSize:
                continue
            #print ("bbox width ratio = {}, height ratio = {}".format(float(box_width)/float(width), float(box_height)/float(height)))
            lstyle = '--'
        print("{}: ({:.3f}, {:.3f}, {:.3f}, {:.3f}), {:.3f}".format(
            dataset.classes[classes[i]], bbox[0], bbox[1], bbox[2], bbox[3],
            score))
        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor=ecolor,
                          linestyle=lstyle,
                          linewidth=1.0))

        if show_class:
            ax.text(
                bbox[0],
                bbox[1] - 2,
                get_class_string(classes[i], score, dataset),
                fontsize=5,
                #family='serif',
                bbox=dict(facecolor=tcolor, alpha=0.9, edgecolor='none'),
                color='white')

    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)),
                dpi=dpi,
                bbox_inches='tight',
                pad_inches=0)
    plt.close('all')
示例#10
0
def vis_one_image_Tarik(im,
                        im_name,
                        output_dir,
                        boxes,
                        segms=None,
                        keypoints=None,
                        thresh=0.9,
                        kp_thresh=2,
                        dpi=200,
                        box_alpha=0.0,
                        dataset=None,
                        show_class=False,
                        ext='pdf',
                        out_when_no_box=False):
    """Visual debugging of detections."""

    import matplotlib
    from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
    from matplotlib.figure import Figure
    from PIL import Image, ImageDraw

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if (boxes is None or boxes.shape[0] == 0
            or max(boxes[:, 4]) < thresh) and not out_when_no_box:
        print(
            "__________________________________________________________________________________________________________"
        )
        print("NO DETECTION")
        print(
            "__________________________________________________________________________________________________________"
        )
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    fig2 = plt.figure(frameon=False)
    fig2.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax2 = plt.Axes(fig2, [0., 0., 1., 1.])
    ax2.axis('off')
    fig2.add_axes(ax2)
    ax2.imshow(np.zeros_like(im))

    # fig3 = plt.figure(frameon=False)
    # fig3.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    # ax3 = plt.Axes(fig3, [0., 0., 1., 1.])
    # ax3.axis('off')
    # fig3.add_axes(ax3)

    fig_mask = Figure()
    canvas = FigureCanvas(fig_mask)
    ax_mask = fig_mask.gca()
    ax_mask.imshow(np.zeros_like(im))

    if boxes is None:
        sorted_inds = []  # avoid crash when 'boxes' is None
    else:
        # Display in largest to smallest order to reduce occlusion
        areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
        sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    print("Number of Objects detected:", sorted_inds.shape[-1])

    # sort indices from highest to lowest detection score (confidence)
    scores = boxes[:, -1]
    sorted_inds = np.argsort(-scores)

    # keep only the 'person' class detections
    idxs = []
    for i in sorted_inds:
        class_string_i = get_class_string(classes[i], scores[i], dataset)
        #print(class_string_i)
        if "person" in get_class_string(classes[i], scores[i], dataset):
            if scores[i] >= thresh:
                idxs.append(i)
            #print(class_string_i, i)

    if not idxs:
        print("###############")
        print(
            "No person objects detected with confidence higher than the threshold"
        )
        print("###############")
        return
    #thresh = 0.5
    # only process the person detection with the highest confidence
    for i in idxs[0:1]:
        #for i in sorted_inds[0:1]:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            print(score, '<', thresh, '=>skipping')
            continue

        print(get_class_string(classes[i], score, dataset), score)

        # show box (off by default)
        # ax.add_patch(
        #     plt.Rectangle((bbox[0], bbox[1]),
        #                   bbox[2] - bbox[0],
        #                   bbox[3] - bbox[1],
        #                   fill=False, edgecolor='g',
        #                   linewidth=0.5, alpha=box_alpha))

        # if show_class:
        #     ax.text(
        #         bbox[0], bbox[1] - 2,
        #         get_class_string(classes[i], score, dataset),
        #         fontsize=3,
        #         family='serif',
        #         bbox=dict(
        #             facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
        #         color='white')

        # show mask
        if segms is not None and len(segms) > i:
            print("segmenting")
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            contour = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                       cv2.CHAIN_APPROX_NONE)[-2]

            idx = 1
            mask_cv = np.zeros(im.shape[:2])

            for c in contour:
                idx += 1
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=.5)
                ax.add_patch(polygon)

                polygon_mask = Polygon(c.reshape((-1, 2)),
                                       fill=True,
                                       facecolor='w',
                                       edgecolor='w',
                                       linewidth=0,
                                       alpha=1.0)
                ax2.add_patch(polygon_mask)

                polygon_mask2 = Polygon(c.reshape((-1, 2)),
                                        fill=True,
                                        facecolor='w',
                                        edgecolor='w',
                                        linewidth=0,
                                        alpha=1.0)
                ax_mask.add_patch(polygon_mask2)
                cv2.fillPoly(mask_cv, pts=[c], color=(255, 255, 255))
            im_masked = im * mask_cv[:, :, np.newaxis]
            #ax_mask.axis('off')
            #canvas.draw()

            #img = Image.new('L', im.shape[:2], 0)
            #ImageDraw.Draw(img).polygon(polygon_mask2, outline=1, fill=1)
            #mask = numpy.array(img)

            #width, height = fig_mask.get_size_inches() * fig_mask.get_dpi()
            #image = np.fromstring(canvas.tostring_rgb()).reshape(height, width, 3)
            # polygon_mask2 = Polygon(
            #     c.reshape((-1, 2)),
            #     fill=True, facecolor='w',
            #     edgecolor='w', linewidth=0,
            #     alpha=0.5)
            # ax3.imshow(im)
            # ax3.add_patch(polygon_mask2)

            # imgs = [obj for obj in ax2.get_children() if isinstance(obj, matplotlib.image.AxesImage)]
            # print("imgs:",len(imgs))
            # print("ax.get_images():", ax2.get_images())
            # #ax3.imshow(im)

            #mask = np.zeros(im.shape, np.uint8)
            #cv2.drawContours(mask, contour, -1, (255,255,255),1)
            #im_masked = im*mask

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(kps[0, i1],
                             kps[1, i1],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(kps[0, i2],
                             kps[1, i2],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

    #from scipy.misc import imsave as imsave

    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)

    output_name_mask = os.path.basename(im_name) + '_mask.' + ext
    fig2.savefig(os.path.join(output_dir, '{}'.format(output_name_mask)),
                 dpi=dpi)

    output_name3 = os.path.basename(im_name) + '_masked.' + ext
    # #fig3.savefig(os.path.join(output_dir, '{}'.format(output_name3)), dpi=dpi)
    plt.imsave(os.path.join(output_dir, '{}'.format(output_name3)),
               im_masked,
               cmap='gray')
    #print(image.shape)

    plt.close('all')
示例#11
0
文件: vis.py 项目: csjunxu/Detectron
def vis_one_image(
        im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9,
        kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
        ext='pdf'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='g',
                          linewidth=0.5, alpha=box_alpha))

        if show_class:
            ax.text(
                bbox[0], bbox[1] - 2,
                get_class_string(classes[i], score, dataset),
                fontsize=3,
                family='serif',
                bbox=dict(
                    facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(
                e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(
                    c.reshape((-1, 2)),
                    fill=True, facecolor=color_mask,
                    edgecolor='w', linewidth=1.2,
                    alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(
                        kps[0, i1], kps[1, i1], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(
                        kps[0, i2], kps[1, i2], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (
                kps[:2, dataset_keypoints.index('right_hip')] +
                kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh and
                    kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines) + 1], linewidth=1.0,
                    alpha=0.7)

    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')
示例#12
0
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes,
                  segms=None,
                  keypoints=None,
                  body_uv=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext='jpg',
                  frame_no=None):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        print("Box:None, Shape:0 or MaxBox below thresh")
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    optime = time.time()
    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor='g',
                          linewidth=0.5,
                          alpha=box_alpha))

        if show_class:
            ax.text(bbox[0],
                    bbox[1] - 2,
                    get_class_string(classes[i], score, dataset),
                    fontsize=10,
                    family='serif',
                    bbox=dict(facecolor='g',
                              alpha=0.4,
                              pad=0,
                              edgecolor='none'),
                    color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=0.5)
                ax.add_patch(polygon)
        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(kps[0, i1],
                             kps[1, i1],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(kps[0, i2],
                             kps[1, i2],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)
        #   DensePose Visualization Starts!!
        ##  Get full IUV image out
    IUV_fields = body_uv[1]
    #
    All_Coords = np.zeros(im.shape)
    All_inds = np.zeros([im.shape[0], im.shape[1]])
    K = 26
    ##
    inds = np.argsort(boxes[:, 4])
    ##
    for i, ind in enumerate(inds):
        entry = boxes[ind, :]
        if entry[4] > 0.65:
            entry = entry[0:4].astype(int)
            ####
            output = IUV_fields[ind]
            ####
            All_Coords_Old = All_Coords[entry[1]:entry[1] + output.shape[1],
                                        entry[0]:entry[0] + output.shape[2], :]
            All_Coords_Old[All_Coords_Old == 0] = output.transpose(
                [1, 2, 0])[All_Coords_Old == 0]
            All_Coords[entry[1]:entry[1] + output.shape[1],
                       entry[0]:entry[0] + output.shape[2], :] = All_Coords_Old
            ###
            CurrentMask = (output[0, :, :] > 0).astype(np.float32)
            All_inds_old = All_inds[entry[1]:entry[1] + output.shape[1],
                                    entry[0]:entry[0] + output.shape[2]]
            All_inds_old[All_inds_old ==
                         0] = CurrentMask[All_inds_old == 0] * i
            All_inds[entry[1]:entry[1] + output.shape[1],
                     entry[0]:entry[0] + output.shape[2]] = All_inds_old
    #
    All_Coords[:, :, 1:3] = 255. * All_Coords[:, :, 1:3]
    All_Coords[All_Coords > 255] = 255.
    All_Coords = All_Coords.astype(np.uint8)
    All_inds = All_inds.astype(np.uint8)
    # pdb.set_trace()
    #draw frame and contours to canvas
    plt.contour(All_Coords[:, :, 1] / 256., 10, linewidths=1)
    plt.contour(All_Coords[:, :, 2] / 256., 10, linewidths=1)
    plt.contour(All_inds, linewidths=3)
    plt.axis('off')
    fig.canvas.draw()
    # convert canvas to image
    img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    img = img.reshape(fig.canvas.get_width_height()[::-1] + (3, ))
    # img is rgb, convert to opencv's default bgr
    img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
    # display image with opencv
    cv2.imshow("plot", img)
    cv2.waitKey(1)
    print('\t-Visualized in {: .3f}s'.format(time.time() - optime))
    output_name = os.path.basename(im_name) + '.' + ext
    filetime = time.time()
    out_dir = os.path.join(output_dir, 'vid')
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
    out_file = 'file%02d.png' % frame_no
    fig.savefig(os.path.join(out_dir, out_file), dpi=dpi)
    print('\t-Output file wrote in {: .3f}s'.format(time.time() - filetime))
    plt.close('all')
    return True
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes,
                  segms=None,
                  keypoints=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext='pdf',
                  out_when_no_box=False):
    """Visual debugging of detections."""
    # dynamic objects from coco that makes sense for kitti dataset:
    # all types of vehicle and person
    dynamic_objs = [1, 2, 3, 4, 5, 6, 7, 8, 9]
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    #sys.exit(-1)
    # no bounding box information found
    if (boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh):
        detect_mask = np.zeros((im.shape[0], im.shape[1]), np.uint8)
        return detect_mask

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)
    #cv2.imwrite(os.path.join(output_dir, 'RGB.png'), masks[:, :, 1])

    if boxes is None:
        sorted_inds = []  # avoid crash when 'boxes' is None
    else:
        # Display in largest to smallest order to reduce occlusion
        areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
        sorted_inds = np.argsort(-areas)

    mask_color_id = 0

    # Creating variables to store bbox and mask
    stored_class = []
    stored_mask = []
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor='g',
                          linewidth=0.5,
                          alpha=box_alpha))

        if show_class:
            ax.text(bbox[0],
                    bbox[1] - 2,
                    get_class_string(classes[i], score, dataset),
                    fontsize=3,
                    family='serif',
                    bbox=dict(facecolor='g',
                              alpha=0.4,
                              pad=0,
                              edgecolor='none'),
                    color='white')

        # show mask
        """
        This is where process of writing patches goes into action
        """
        if segms is not None and len(segms) > i:
            #print("class label:{}".format(classes[i]))

            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                #print("c shape: {}".format(c.reshape((-1, 2)).shape))
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=0.5)
                ax.add_patch(polygon)

            # Create a binary mask hxw, cropped if bounding box info given
            binary_mask = create_binary_mask(im, contour)
            stored_mask.append(binary_mask)
            stored_class.append(classes[i])
            #cv2.imwrite('/detectron/result/blah.png', binary_mask)
            #sys.exit()

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(kps[0, i1],
                             kps[1, i1],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(kps[0, i2],
                             kps[1, i2],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

    detect_mask = np.zeros((im.shape[0], im.shape[1]), np.uint8)
    tmp_mask1 = np.zeros((im.shape[0], im.shape[1]), np.uint8)
    assert (len(stored_class) == len(stored_mask))
    for idx, mask in enumerate(stored_mask):
        label = stored_class[idx]
        if label not in dynamic_objs:
            continue
        tmp_mask1[mask > 0] = idx + 1
        # fix occlusion
        detect_mask[tmp_mask1 > 0] = 0
        detect_mask += tmp_mask1

    if np.unique(detect_mask).max() > 80:
        print("exceeded class label")
        fig.savefig("/detectron/result/blah.jpg", dpi=dpi)
        plt.close('all')
        sys.exit()
    """plt.close('all')
    cv2.imwrite('/detectron/result/blah.png', detect_mask)
    fig.savefig("/detectron/result/blah.jpg", dpi=dpi)
    plt.close('all')
    sys.exit()"""
    plt.close('all')
    return detect_mask
示例#14
0
def vis_one_image(
        im, im_name, boxes, segm_contours, obj_names, keypoints=None,
        kp_thresh=2, dpi=200, box_alpha=0.0, show_class=True):
    """Visual debugging of detections. We assume that there are detections"""
    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    assert boxes is not None
    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    for mask_color_id, i in enumerate(sorted_inds):
        bbox = boxes[i, :4]
        score = boxes[i, -1]

        # show box
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor=color_list[mask_color_id % len(color_list)],
                          linewidth=3, alpha=box_alpha))
        if show_class:
            # TODO: Make some boxes BIGGER if they are far from other things
            y_coord = bbox[1] - 2
            fontsize = max(min(bbox[2] - bbox[0], bbox[3] - bbox[1]) / 40, 5)
            if fontsize * 2 > y_coord:
                y_coord += fontsize * 2 + 2

            ax.text(
                bbox[0], y_coord,
                obj_names[i] + ' {:0.2f}'.format(score).lstrip('0'),
                fontsize=fontsize,
                family='serif',
                bbox=dict(
                    facecolor=color_list[mask_color_id % len(color_list)],
                    alpha=0.4, pad=0, edgecolor='none'),
                color='white')

        # show mask
        if len(segm_contours) > 0:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]

            for segm_part in segm_contours[i]:
                polygon = Polygon(
                    np.array(segm_part),
                    fill=True, facecolor=color_mask,
                    edgecolor='w', linewidth=1.2,
                    alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = np.array(keypoints[i])
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(
                        kps[0, i1], kps[1, i1], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(
                        kps[0, i2], kps[1, i2], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                                   kps[:2, dataset_keypoints.index('right_shoulder')] +
                                   kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (
                              kps[:2, dataset_keypoints.index('right_hip')] +
                              kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh and
                    kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines) + 1], linewidth=1.0,
                    alpha=0.7)

    ext = im_name.split('.')[-1]
    rest_of_the_fn = im_name[:-(len(ext) + 1)]
    ext2use = 'png' if ext == 'jpg' else ext
    output_name = rest_of_the_fn + '.' + ext2use
    fig.savefig(output_name, dpi=dpi)
    plt.close('all')

    # Convert to JPG manually... ugh
    if ext == 'jpg':
        assert os.path.exists(output_name)
        png_img = cv2.imread(output_name)
        cv2.imwrite(rest_of_the_fn + '.' + ext, png_img, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
        os.remove(output_name)
示例#15
0
def vis_one_image_uv(im,
                     boxes,
                     segms=None,
                     keypoints=None,
                     body_uv=None,
                     thresh=0.9,
                     kp_thresh=2,
                     show_box=False,
                     dataset=None,
                     show_class=False):
    """Constructs a numpy array with the detections visualized."""
    im.astype(np.uint8)
    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return im

    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)
    color_list = colormap(rgb=True) / 255
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    IUV_fields = body_uv[1]
    #
    All_Coords = np.zeros(im.shape)
    All_inds = np.zeros([im.shape[0], im.shape[1]])
    K = 26
    ##
    inds = np.argsort(boxes[:, 4])
    ##
    for i, ind in enumerate(inds):
        entry = boxes[ind, :]
        if entry[4] > 0.65:
            entry = entry[0:4].astype(int)
            ####
            output = IUV_fields[ind]
            ####
            All_Coords_Old = All_Coords[entry[1]:entry[1] + output.shape[1],
                                        entry[0]:entry[0] + output.shape[2], :]
            All_Coords_Old[All_Coords_Old == 0] = output.transpose(
                [1, 2, 0])[All_Coords_Old == 0]
            All_Coords[entry[1]:entry[1] + output.shape[1],
                       entry[0]:entry[0] + output.shape[2], :] = All_Coords_Old
            ###
            CurrentMask = (output[0, :, :] > 0).astype(np.float32)
            All_inds_old = All_inds[entry[1]:entry[1] + output.shape[1],
                                    entry[0]:entry[0] + output.shape[2]]
            All_inds_old[All_inds_old ==
                         0] = CurrentMask[All_inds_old == 0] * i
            All_inds[entry[1]:entry[1] + output.shape[1],
                     entry[0]:entry[0] + output.shape[2]] = All_inds_old
    #
    All_Coords[:, :, 1:3] = 255. * All_Coords[:, :, 1:3]
    All_Coords[All_Coords > 255] = 255.
    All_Coords = All_Coords.astype(np.uint8)
    All_inds = All_inds.astype(np.uint8)

    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        if show_box:
            im = vis_bbox(
                im, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]))

        # show class (off by default)
        if show_class:
            class_str = get_class_string(classes[i], score, dataset)
            im = vis_class(im, (bbox[0], bbox[1] - 2), class_str)

        # show mask
        if segms is not None and len(segms) > i:
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1
            im = vis_mask(im, masks[..., i], color_mask)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            im = vis_keypoints(im, keypoints[i], kp_thresh)

    return im, All_Coords, All_inds