Exemplo n.º 1
0
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
    """Visualizes keypoints (adapted from vis_one_image).
    kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
    """
    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)

    # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
    colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]

    # Perform the drawing on a copy of the image, to allow for blending.
    kp_mask = np.copy(img)

    # Draw mid shoulder / mid hip first for better visualization.
    mid_shoulder = (
        kps[:2, dataset_keypoints.index('right_shoulder')] +
        kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
    sc_mid_shoulder = np.minimum(
        kps[2, dataset_keypoints.index('right_shoulder')],
        kps[2, dataset_keypoints.index('left_shoulder')])
    mid_hip = (
        kps[:2, dataset_keypoints.index('right_hip')] +
        kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
    sc_mid_hip = np.minimum(
        kps[2, dataset_keypoints.index('right_hip')],
        kps[2, dataset_keypoints.index('left_hip')])
    nose_idx = dataset_keypoints.index('nose')
    if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
        cv2.line(
            kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),
            color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)
    if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
        cv2.line(
            kp_mask, tuple(mid_shoulder), tuple(mid_hip),
            color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)

    # Draw the keypoints.
    for l in range(len(kp_lines)):
        i1 = kp_lines[l][0]
        i2 = kp_lines[l][1]
        p1 = kps[0, i1], kps[1, i1]
        p2 = kps[0, i2], kps[1, i2]
        if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
            cv2.line(
                kp_mask, p1, p2,
                color=colors[l], thickness=2, lineType=cv2.LINE_AA)
        if kps[2, i1] > kp_thresh:
            cv2.circle(
                kp_mask, p1,
                radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
        if kps[2, i2] > kp_thresh:
            cv2.circle(
                kp_mask, p2,
                radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)

    # Blend the keypoints.
    return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)
Exemplo n.º 2
0
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
    colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]
    kp_mask = np.copy(img)
    mid_shoulder = (kps[:2, dataset_keypoints.index('right_shoulder')] +
                    kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
    sc_mid_shoulder = np.minimum(
        kps[2, dataset_keypoints.index('right_shoulder')],
        kps[2, dataset_keypoints.index('left_shoulder')])
    mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
               kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
    sc_mid_hip = np.minimum(kps[2, dataset_keypoints.index('right_hip')],
                            kps[2, dataset_keypoints.index('left_hip')])
    nose_idx = dataset_keypoints.index('nose')
    if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
        cv2.line(kp_mask,
                 tuple(mid_shoulder),
                 tuple(kps[:2, nose_idx]),
                 color=colors[len(kp_lines)],
                 thickness=2,
                 lineType=cv2.LINE_AA)
    if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
        cv2.line(kp_mask,
                 tuple(mid_shoulder),
                 tuple(mid_hip),
                 color=colors[len(kp_lines) + 1],
                 thickness=2,
                 lineType=cv2.LINE_AA)
    for l in range(len(kp_lines)):
        i1 = kp_lines[l][0]
        i2 = kp_lines[l][1]
        p1 = kps[0, i1], kps[1, i1]
        p2 = kps[0, i2], kps[1, i2]
        if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
            cv2.line(kp_mask,
                     p1,
                     p2,
                     color=colors[l],
                     thickness=2,
                     lineType=cv2.LINE_AA)
        if kps[2, i1] > kp_thresh:
            cv2.circle(kp_mask,
                       p1,
                       radius=3,
                       color=colors[l],
                       thickness=-1,
                       lineType=cv2.LINE_AA)
        if kps[2, i2] > kp_thresh:
            cv2.circle(kp_mask,
                       p2,
                       radius=3,
                       color=colors[l],
                       thickness=-1,
                       lineType=cv2.LINE_AA)
    return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)
Exemplo n.º 3
0
def get_result_json(boxes, segms, keypoints, thresh=0.7, dataset=None):

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None:
        masks = mask_util.decode(segms)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)
    sorted_inds = np.argsort(-boxes[:, 4])

    results = {'mask_rle': segms, 'objects': []}
    for i in sorted_inds:
        score = boxes[i, -1]

        if score < thresh:
            continue

        bbox = boxes[i, :4]
        class_idx = classes[i]
        class_text = dataset.classes[class_idx]
        mask_idx = i
        mask = masks[:, :, mask_idx]
        #kps = keypoints[i]
        _, contour, hier = cv2.findContours(mask.copy(), cv2.RETR_CCOMP,
                                            cv2.CHAIN_APPROX_NONE)
        contours = [c.reshape((-1, 2)).tolist() for c in contour]
        obj = {
            'box': bbox.tolist(),
            'class': class_text,
            'mask_idx': mask_idx,
            'contours': contours,
            'score': float(score)
        }
        results['objects'].append(obj)

    return results
Exemplo n.º 4
0
def vis_one_image_cvpr2018_wad(
        im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9,
        kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
        ext='jpg'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)
    mask_color_id = 0
    # [35, 38, 36, 39, 40, 34, 33]

    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        json_id = dataset.contiguous_category_id_to_json_id[classes[i]]
        class_string = dataset.id_map_to_cat[json_id]
        print(class_string, score)
        # show box (off by default, box_alpha=0.0)
        ax.add_patch(plt.Rectangle((bbox[0], bbox[1]),
                                   bbox[2] - bbox[0],
                                   bbox[3] - bbox[1],
                                   fill=False, edgecolor='g',
                                   linewidth=0.5, alpha=box_alpha))

        if show_class:
            ax.text(bbox[0], bbox[1] - 2,
                    class_string + ' {:0.2f}'.format(score).lstrip('0'),
                    fontsize=10,
                    family='serif',
                    bbox=dict(facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                    color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(
                e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(
                    c.reshape((-1, 2)),
                    fill=True, facecolor=color_mask,
                    edgecolor='w', linewidth=1.2,
                    alpha=0.5)
                ax.add_patch(polygon)

        output_name = os.path.basename(im_name) + '.' + ext
        fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
        plt.close('all')
Exemplo n.º 5
0
def vis_one_image_car(im,
                      im_name,
                      output_dir,
                      boxes,
                      segms=None,
                      keypoints=None,
                      thresh=0.9,
                      kp_thresh=2,
                      dpi=200,
                      box_alpha=0.0,
                      dataset=None,
                      show_class=False,
                      ext='jpg'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections_car(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        print('vehicle', score)
        # show box (off by default, box_alpha=0.0)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor='g',
                          linewidth=0.5,
                          alpha=box_alpha))

        if show_class:
            ax.text(bbox[0],
                    bbox[1] - 2,
                    get_class_string_car(classes[i], score, dataset),
                    fontsize=3,
                    family='serif',
                    bbox=dict(facecolor='g',
                              alpha=0.4,
                              pad=0,
                              edgecolor='none'),
                    color='white')

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if i1 > 13 or i2 > 13:
                    continue
                #print(kps[2],i1,i2)
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = ax.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    ax.plot(kps[0, i1],
                            kps[1, i1],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)
                if kps[2, i2] > kp_thresh:
                    ax.plot(kps[0, i2],
                            kps[1, i2],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)

        output_name = os.path.basename(im_name) + '.' + ext
        fig.savefig(os.path.join(output_dir, '{}'.format(output_name)),
                    dpi=dpi)
        plt.close('all')
Exemplo n.º 6
0
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes=None,
                  segms=None,
                  keypoints=None,
                  score_info=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext='png'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    boxes, segms, keypoints, classes = convert_from_cls_format(
        boxes, segms, keypoints)

    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    # areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    # sorted_inds = np.argsort(-areas)
    score_list = [sl for slist in score_info for sl in slist]
    # segms = [s for slist in cls_segms for s in slist]
    mask_color_id = 0
    for i in range(len(classes)):
        score = score_list[i]
        if score < thresh:
            continue

        print(dataset.classes[classes[i]], score)
        # show box (off by default, box_alpha=0.0)
        # ax.add_patch(
        #     plt.Rectangle((bbox[0], bbox[1]),
        #                   bbox[2] - bbox[0],
        #                   bbox[3] - bbox[1],
        #                   fill=False, edgecolor='g',
        #                   linewidth=0.5, alpha=box_alpha))

        # if show_class:
        #     ax.text(
        #         bbox[0], bbox[1] - 2,
        #         get_class_string(classes[i], score, dataset),
        #         fontsize=3,
        #         family='serif',
        #         bbox=dict(
        #             facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
        #         color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=0.5)
                ax.add_patch(polygon)

        output_name = os.path.basename(im_name) + '.' + ext
        fig.savefig(os.path.join(output_dir, '{}'.format(output_name)),
                    dpi=dpi)
        plt.close('all')

    print("Saved pdfs in {}/{}".format(output_dir, im_name))
Exemplo n.º 7
0
def vis_one_image_srishti(
        im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9,
        kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
        ext='pdf'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(
                e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)

            iterator = 0
            for c in contour:
                iterator = iterator +1
                if iterator > 2:
                    break
                polygon = Polygon(
                    c.reshape((-1, 2)),
                    fill=True, facecolor=color_mask,
                    edgecolor='w', linewidth=1.2,
                    alpha=0.5)
                ax.add_patch(polygon)

    output_name_png = os.path.basename(im_name) + '.' + 'png'
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name_png)), dpi=dpi)

    plt.close('all')
Exemplo n.º 8
0
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes,
                  segms=None,
                  keypoints=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.8,
                  dataset=None,
                  show_class=False,
                  ext='pdf',
                  labels=None):
    """Visual debugging of detections."""
    # if not os.path.exists ( output_dir ) :
    # 	os.makedirs ( output_dir )

    print("Processing image: {}".format(im_name))

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    # 这里的mask是一个三维数组,1,2维分别代表原图的横纵坐标,3维一共有当前预测出的instanecs个数的大小。
    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    # print ( np.unique ( classes ) )
    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    # 默认从大到小排序,先画大的,小的会在后面把大的覆盖,这样大的和小的都能看到。
    sorted_inds = np.argsort(-areas)
    instance_cnt = defaultdict(int)

    labels_graph = np.zeros((im.shape[0], im.shape[1]))

    for item in enumerate(["instances_text", "instances", "labels"]):
        if not os.path.exists(os.path.join(output_dir, item[1])):
            os.makedirs(os.path.join(output_dir, item[1]), 0o777)

    mask_file = open("{}/instances_text/{}.txt".format(output_dir, im_name),
                     "w")

    # output labels prediction
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        # 如果置信度大于0.5,执行
        if score > 0.5:
            single_mask = masks[:, :, i]
            instances_graph = np.zeros((im.shape[0], im.shape[1]))

            label_id = classes[i]
            instance_cnt[dataset.classes[label_id]] += 1
            instance_id = instance_cnt[dataset.classes[label_id]]
            labels_graph[single_mask == 1] = label_id
            instances_graph[single_mask == 1] = 255
            scipy.misc.imsave(
                '{}/instances/{}_{}.png'.format(output_dir, im_name,
                                                label_id * 256 + instance_id),
                instances_graph)

            # 写入格式为mask_file_point class_name score
            mask_file.write("{}/instances/{}_{}.png {} {}\n".format(
                output_dir, im_name, label_id * 256 + instance_id, classes[i],
                score))

    # # current_graph存的是比较raw的值
    # scipy.misc.imsave ( '/nfs/project/libo_i/mask-rcnn.pytorch/map_evaluation_format/raw/{}.jpg'.format ( im_name ) ,
    #                     labels_graph )
    # print ( np.unique ( labels_graph ) )
    # colored_graph里存的是经过config中的cmap赋值之后的值。
    # colored_graph = apply_color_map ( labels_graph , labels )
    # gray_graph = rgb2gray ( colored_graph )
    scipy.misc.imsave('{}/labels/{}.png'.format(output_dir, im_name),
                      labels_graph)

    mask_file.close()
Exemplo n.º 9
0
def vis_one_image(
        im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9,
        kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
        ext='pdf'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='g',
                          linewidth=0.5, alpha=box_alpha))

        if show_class:
            ax.text(
                bbox[0], bbox[1] - 2,
                get_class_string(classes[i], score, dataset),
                fontsize=3,
                family='serif',
                bbox=dict(
                    facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(
                e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(
                    c.reshape((-1, 2)),
                    fill=True, facecolor=color_mask,
                    edgecolor='w', linewidth=1.2,
                    alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(
                        kps[0, i1], kps[1, i1], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(
                        kps[0, i2], kps[1, i2], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (
                kps[:2, dataset_keypoints.index('right_hip')] +
                kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh and
                    kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines) + 1], linewidth=1.0,
                    alpha=0.7)

    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')
Exemplo n.º 10
0
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes,
                  segms=None,
                  keypoints=None,
                  body_uv=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext='pdf'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        print(dataset.classes[classes[i]], score)
        # show box (off by default, box_alpha=0.0)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor='g',
                          linewidth=0.5,
                          alpha=box_alpha))

        if show_class:
            ax.text(bbox[0],
                    bbox[1] - 2,
                    get_class_string(classes[i], score, dataset),
                    fontsize=3,
                    family='serif',
                    bbox=dict(facecolor='g',
                              alpha=0.4,
                              pad=0,
                              edgecolor='none'),
                    color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = ax.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    ax.plot(kps[0, i1],
                            kps[1, i1],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)
                if kps[2, i2] > kp_thresh:
                    ax.plot(kps[0, i2],
                            kps[1, i2],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = ax.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = ax.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

    #   DensePose Visualization Starts!!
    ##  Get full IUV image out
    if body_uv is not None and len(body_uv) > 1:
        IUV_fields = body_uv[1]
        #
        All_Coords = np.zeros(im.shape)
        All_inds = np.zeros([im.shape[0], im.shape[1]])
        K = 26
        ##
        inds = np.argsort(boxes[:, 4])
        ##
        for i, ind in enumerate(inds):
            entry = boxes[ind, :]
            if entry[4] > 0.65:
                entry = entry[0:4].astype(int)
                ####
                output = IUV_fields[ind]
                ####
                All_Coords_Old = All_Coords[entry[1]:entry[1] +
                                            output.shape[1],
                                            entry[0]:entry[0] +
                                            output.shape[2], :]
                All_Coords_Old[All_Coords_Old == 0] = output.transpose(
                    [1, 2, 0])[All_Coords_Old == 0]
                All_Coords[entry[1]:entry[1] + output.shape[1],
                           entry[0]:entry[0] +
                           output.shape[2], :] = All_Coords_Old
                ###
                CurrentMask = (output[0, :, :] > 0).astype(np.float32)
                All_inds_old = All_inds[entry[1]:entry[1] + output.shape[1],
                                        entry[0]:entry[0] + output.shape[2]]
                All_inds_old[All_inds_old ==
                             0] = CurrentMask[All_inds_old == 0] * i
                All_inds[entry[1]:entry[1] + output.shape[1],
                         entry[0]:entry[0] + output.shape[2]] = All_inds_old
        #
        All_Coords[:, :, 1:3] = 255. * All_Coords[:, :, 1:3]
        All_Coords[All_Coords > 255] = 255.
        All_Coords = All_Coords.astype(np.uint8)
        All_inds = All_inds.astype(np.uint8)
        #
        IUV_SaveName = os.path.basename(im_name).split('.')[0] + '_IUV.png'
        INDS_SaveName = os.path.basename(im_name).split('.')[0] + '_INDS.png'
        cv2.imwrite(os.path.join(output_dir, '{}'.format(IUV_SaveName)),
                    All_Coords)
        cv2.imwrite(os.path.join(output_dir, '{}'.format(INDS_SaveName)),
                    All_inds)
        print('IUV written to: ',
              os.path.join(output_dir, '{}'.format(IUV_SaveName)))
        ###
        ### DensePose Visualization Done!!
    #
    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')

    #   SMPL Visualization
    if body_uv is not None and len(body_uv) > 2:
        smpl_fields = body_uv[2]
        #
        All_Coords = np.zeros(im.shape)
        # All_inds = np.zeros([im.shape[0], im.shape[1]])
        K = 26
        ##
        inds = np.argsort(boxes[:, 4])
        ##
        for i, ind in enumerate(inds):
            entry = boxes[ind, :]
            if entry[4] > 0.75:
                entry = entry[0:4].astype(int)
                center_roi = [(entry[2] + entry[0]) / 2.,
                              (entry[3] + entry[1]) / 2.]
                ####
                output, center_out = smpl_fields[ind]
                ####
                x1_img = max(int(center_roi[0] - center_out[0]), 0)
                y1_img = max(int(center_roi[1] - center_out[1]), 0)

                x2_img = min(
                    int(center_roi[0] - center_out[0]) + output.shape[2],
                    im.shape[1])
                y2_img = min(
                    int(center_roi[1] - center_out[1]) + output.shape[1],
                    im.shape[0])

                All_Coords_Old = All_Coords[y1_img:y2_img, x1_img:x2_img, :]

                x1_out = max(int(center_out[0] - center_roi[0]), 0)
                y1_out = max(int(center_out[1] - center_roi[1]), 0)

                x2_out = x1_out + (x2_img - x1_img)
                y2_out = y1_out + (y2_img - y1_img)

                output = output[:, y1_out:y2_out, x1_out:x2_out]

                # All_Coords_Old = All_Coords[entry[1]: entry[1] + output.shape[1], entry[0]:entry[0] + output.shape[2],
                #                  :]
                All_Coords_Old[All_Coords_Old == 0] = output.transpose(
                    [1, 2, 0])[All_Coords_Old == 0]
                All_Coords[y1_img:y2_img, x1_img:x2_img, :] = All_Coords_Old
                ###
                # CurrentMask = (output[0, :, :] > 0).astype(np.float32)
                # All_inds_old = All_inds[entry[1]: entry[1] + output.shape[1], entry[0]:entry[0] + output.shape[2]]
                # All_inds_old[All_inds_old == 0] = CurrentMask[All_inds_old == 0] * i
                # All_inds[entry[1]: entry[1] + output.shape[1], entry[0]:entry[0] + output.shape[2]] = All_inds_old
        #
        All_Coords = 255. * All_Coords
        All_Coords[All_Coords > 255] = 255.
        All_Coords = All_Coords.astype(np.uint8)

        image_stacked = im[:, :, ::-1]
        image_stacked[All_Coords > 20] = All_Coords[All_Coords > 20]
        # All_inds = All_inds.astype(np.uint8)
        #
        SMPL_SaveName = os.path.basename(im_name).split('.')[0] + '_SMPL.png'
        smpl_image_SaveName = os.path.basename(im_name).split(
            '.')[0] + '_SMPLimg.png'
        # INDS_SaveName = os.path.basename(im_name).split('.')[0] + '_INDS.png'
        cv2.imwrite(os.path.join(output_dir, '{}'.format(SMPL_SaveName)),
                    All_Coords)
        cv2.imwrite(os.path.join(output_dir, '{}'.format(smpl_image_SaveName)),
                    image_stacked)
        # cv2.imwrite(os.path.join(output_dir, '{}'.format(INDS_SaveName)), All_inds)
        print('SMPL written to: ',
              os.path.join(output_dir, '{}'.format(SMPL_SaveName)))
        ###
        ### SMPL Visualization Done!!
    #
    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')
Exemplo n.º 11
0
def extract_bbox(im,
                 im_name,
                 output_dir,
                 boxes,
                 segms=None,
                 keypoints=None,
                 thresh=0.9,
                 kp_thresh=2,
                 dpi=200,
                 box_alpha=0.0,
                 dataset=None,
                 show_class=False,
                 ext='pdf'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
    """
    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)
    """

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    results = {}
    det_results = []

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        result = {}
        result['location'] = bbox.tolist()
        result['label'] = classes[i]
        result['score'] = score.tolist()

        det_results.append(result)

        # show box (off by default)
        """
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='g',
                          linewidth=0.5, alpha=box_alpha))

        if show_class:
            ax.text(
                bbox[0], bbox[1] - 2,
                get_class_string(classes[i], score, dataset),
                fontsize=3,
                family='serif',
                bbox=dict(
                    facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                color='white')
        """

    print('# of bbox for img {} is {}'.format(im_name, len(det_results)))
    results['detection'] = det_results
    img_id = im_name.split('/')[-1][:-4]
    results['image_id'] = img_id

    # output_name = os.path.basename(im_name) + '.' + ext
    # fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    # plt.close('all')

    return results
Exemplo n.º 12
0
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes,
                  segms=None,
                  keypoints=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext='pdf'):
    # 图像从OpenCV格式转换成PIL格式
    img_PIL = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))

    # 字体  字体*.ttc的存放路径一般是: /usr/share/fonts/opentype/noto/ 查找指令locate *.ttc
    font = ImageFont.truetype('NotoSansCJK-Black.ttc', 40)
    # 字体颜色
    fillColor = (255, 0, 0)
    # 文字输出位置
    position = (100, 100)
    # 输出内容
    str = '在图片上输出中文'

    draw = ImageDraw.Draw(img_PIL)
    draw.text(position, str, font=font, fill=fillColor)
    # 使用PIL中的save方法保存图片到本地
    # img_PIL.save('02.jpg', 'jpeg')

    # 转换回OpenCV格式
    im = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(  # -> n x 5, _, _, [c1, c2, ...]
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        print(dataset.classes[classes[i]], score)
        # show box (off by default, box_alpha=0.0)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor='g',
                          linewidth=0.5,
                          alpha=box_alpha))

        text = get_class_string(classes[i], score, dataset)

        if show_class:  # show class name
            ax.text(bbox[0],
                    bbox[1] - 2,
                    text,
                    fontsize=3,
                    family='serif',
                    bbox=dict(facecolor='g',
                              alpha=0.4,
                              pad=0,
                              edgecolor='none'),
                    color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = ax.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    ax.plot(kps[0, i1],
                            kps[1, i1],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)
                if kps[2, i2] > kp_thresh:
                    ax.plot(kps[0, i2],
                            kps[1, i2],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = ax.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = ax.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

        output_name = os.path.basename(im_name) + '.' + ext
        fig.savefig(os.path.join(output_dir, '{}'.format(output_name)),
                    dpi=dpi)
        plt.close('all')
Exemplo n.º 13
0
def vis_one_image_orig(im,
                       im_name,
                       output_dir,
                       boxes,
                       segms=None,
                       keypoints=None,
                       thresh=0.9,
                       kp_thresh=2,
                       dpi=200,
                       box_alpha=0.0,
                       dataset=None,
                       show_class=False,
                       ext='pdf',
                       entry=None):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # pdb.set_trace()

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(  # -> n x 5, _, _, [c1, c2, ...]
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    #LJ
    imdir = '/data1/liujingyu/DR/fold_all'
    img_path = os.path.join(imdir, im_name.split('_')[1] + '.png')
    # pdb.set_trace()
    assert os.path.exists(img_path)
    img_ori = cv2.imread(img_path, 1)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    Flag = False
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # LJ 注释
        # print(dataset.eng_classes[classes[i]], score)

        # show box (off by default, box_alpha=0.0)
        # LJ 不显示 box
        if False:
            ax.add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                              bbox[2] - bbox[0],
                              bbox[3] - bbox[1],
                              fill=False,
                              edgecolor='g',
                              linewidth=0.5,
                              alpha=box_alpha))

        if False:
            # if show_class:
            ax.text(
                bbox[0],
                bbox[1] - 2,
                # LJ 注释
                get_class_eng_string(classes[i], score, dataset),
                fontsize=12,
                family='serif',
                bbox=dict(facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                # LJ
                #bbox = None,
                color='white')

        # show mask
        # pdb.set_trace()
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            # LJ opencv 版本的原因 返回三值
            bin, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                  cv2.CHAIN_APPROX_NONE)
            # 生成512图
            color_edge = [0, 0, 0]
            for cm in range(3):
                color_edge[cm] = int(color_mask[cm] * 255)
            # pdb.set_trace()
            point0 = (int(bbox[0]), int(bbox[1]))
            text = dataset.eng_classes[classes[i]]
            if classes[i] in entry['cls_list']:
                Flag = True
                cv2.putText(img_ori, text, point0, cv2.FONT_HERSHEY_SIMPLEX, 2,
                            (250, 250, 250), 3)
                img_vis = cv2.drawContours(
                    img_ori, contour, -1,
                    (color_edge[2], color_edge[1], color_edge[0]), 8)

            # 生成半透明mask
            # for c in contour:
            #     polygon = Polygon(
            #         c.reshape((-1, 2)),
            #         fill=True, facecolor=color_mask,
            #         edgecolor='w', linewidth=1.2,
            #         alpha=0.5)
            #     ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = ax.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    ax.plot(kps[0, i1],
                            kps[1, i1],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)
                if kps[2, i2] > kp_thresh:
                    ax.plot(kps[0, i2],
                            kps[1, i2],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = ax.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = ax.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

        # pdb.set_trace()
        # output_name = os.path.basename(im_name) + '.' + ext
        # fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
        # plt.close('all')

        # LJ
        if Flag:
            output_name = os.path.basename(im_name) + '.jpg'
            img_vis = cv2.resize(img_vis, (512, 512),
                                 interpolation=cv2.INTER_CUBIC)
            cv2.imwrite(os.path.join(output_dir, '{}'.format(output_name)),
                        img_vis)
Exemplo n.º 14
0
def vis_one_image(
        im, other_im, im_name, output_dir, boxes, gt_boxes, segms=None, keypoints=None, thresh=0.9,
        kp_thresh=2, dpi=300, box_alpha=0.0, dataset=None, show_class=False,
        ext='png', gt_classes=None):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if isinstance(gt_boxes[0], list):
        box_list = [b for b in gt_boxes[0]]
        if len(box_list) > 0:
            gt_boxes[0] = np.vstack(box_list)
        else:
            gt_boxes[0] = None
            
    if isinstance(gt_boxes[1], list):
        box_list = [b for b in gt_boxes[1]]
        if len(box_list) > 0:
            gt_boxes[1] = np.vstack(box_list)
        else:
            gt_boxes[1] = None
            
    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    #if segms is not None:
    #    masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    if cfg.DATA_SOURCE == 'mammo':
        im = np.hstack([im,im,other_im])
    else:
        im = np.hstack([im,im])
    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    if cfg.DATA_SOURCE == 'mammo':
        fsize = 20
        lwidth = 2
    else:
        fsize = 3
        lwidth = 0.5
 
    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue


        # show box (off by default, box_alpha=0.0)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='g',
                          linewidth=lwidth, alpha=box_alpha))
        if True:
            if show_class:
                show_text = get_class_string(classes[i], score, dataset)
            else:
                show_text = '%.2f' % score
            ax.text(
                bbox[0], bbox[1] - 2,
                show_text,
                fontsize=fsize,
                family='sans-serif',
                bbox=dict(
                    facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                color='white')

    # Draw GTs
    
    if gt_boxes[0] is not None:
        for i in range(gt_boxes[0].shape[0]):

            bbox = gt_boxes[0][i, :4]

            # show box (off by default, box_alpha=0.0)
            ax.add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='r',
                          linewidth=lwidth, alpha=box_alpha))
            if gt_classes is not None:
                show_text = get_gt_class_string(gt_classes[0][i], dataset)
                ax.text(
                    bbox[0], bbox[3] + 10,
                    show_text,
                    fontsize=fsize,
                    family='sans-serif',
                    bbox=dict(
                        facecolor='r', alpha=0.4, pad=0, edgecolor='none'),
                    color='white')

    if gt_boxes[1] is not None:
        for i in range(gt_boxes[1].shape[0]):

            bbox = gt_boxes[1][i, :4]

            # show box (off by default, box_alpha=0.0)
            ax.add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='y',
                          linewidth=lwidth, alpha=box_alpha))
            if gt_classes is not None:
                show_text = get_gt_class_string(gt_classes[1][i], dataset)
                ax.text(
                    bbox[0], bbox[3] + 10,
                    show_text,
                    fontsize=fsize,
                    family='sans-serif',
                    bbox=dict(
                        facecolor='y', alpha=0.4, pad=0, edgecolor='none'),
                    color='white')

    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')
Exemplo n.º 15
0
def vis_one_image2(
        im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9,
        kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
        ext='jpg'):
    """Visual debugging of detections."""

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    output_name = os.path.basename(im_name) + '.txt'
    file = open(os.path.join(output_dir, '{}'.format(output_name)), 'w')
    class_list =[]



    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='g',
                          linewidth=0.5, alpha=box_alpha))

        if show_class:
            ax.text(
                bbox[0], bbox[1] - 2,
                get_class_string(classes[i], score, dataset),
                fontsize=3,
                family='serif',
                bbox=dict(
                    facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                color='white')

        # show mask


        # show keypoints


        # for kp in classes[i]:

        # print (type(classes[i]))
        class_list.append((get_class_string(classes[i], score, dataset)))
        file.write(get_class_string(classes[i], score, dataset)+'\n')



    file.close()
    return class_list
Exemplo n.º 16
0
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes,
                  segms=None,
                  keypoints=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext='pdf'):
    """Visual debugging of detections."""
    if output_dir is not None:
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    if output_dir is not None:
        fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # preprocess the boxes
    if thresh < 0:
        # When VIS_TH less than zero, it means take the highest -thresh score boxes
        sorted_inds = np.argsort(-boxes[:, -1])
        boxes = boxes[sorted_inds[:-int(thresh)]]
        classes = [classes[_] for _ in sorted_inds[:-int(thresh)]]

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    _boxes = []
    texts = []
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue
        if len(_boxes) > 0 and (bbox == _boxes[-1][:4]).all():
            # Same box, merge prediction
            texts[-1] += '/' + get_class_string(classes[i], score, dataset)
        else:
            _boxes.append(boxes[i])
            texts.append(get_class_string(classes[i], score, dataset))
    boxes = np.stack(_boxes)

    mask_color_id = 0

    for i in range(len(boxes)):
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # print(dataset.classes[classes[i]], score)
        print(texts[i])
        # show box (off by default, box_alpha=0.0)
        ax.add_patch(
            plt.Rectangle(
                (bbox[0], bbox[1]),
                bbox[2] - bbox[0],
                bbox[3] - bbox[1],
                fill=False,
                edgecolor='r',  #'##66FF66' if '@'in texts[i] else '#0099FF' ,
                linewidth=5,
                alpha=box_alpha))

        if show_class:
            ax.text(
                bbox[0],
                bbox[1] + 6,
                texts[i].split(' ')
                [0],  #get_class_string(classes[i], score, dataset),
                fontsize=3,
                family='serif',
                bbox=dict(
                    facecolor='#66FF66' if '@' in texts[i] else '#0099FF',
                    alpha=0.4,
                    pad=0,
                    edgecolor='none'),
                color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = ax.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    ax.plot(kps[0, i1],
                            kps[1, i1],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)
                if kps[2, i2] > kp_thresh:
                    ax.plot(kps[0, i2],
                            kps[1, i2],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = ax.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = ax.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

        if output_dir is not None:
            output_name = os.path.basename(im_name) + '.' + ext
            fig.savefig(os.path.join(output_dir, '{}'.format(output_name)),
                        dpi=dpi)
            plt.close('all')
        else:
            plt.plot()
Exemplo n.º 17
0
def vis_one_image(im,
                  im_name,
                  boxes,
                  segms=None,
                  keypoints=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext=None):
    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)
    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return
    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)
    color_list = colormap(rgb=True) / 255
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    print("areas%%%%%%%%%%%%%%%%%")
    print(areas)
    sorted_inds = np.argsort(-areas)
    print("sorted_inds&&&&&&&&&&&&&&&&&&")
    print(sorted_inds)
    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        print("zhege IIIIIIIIIII")
        print(i)
        if score < thresh:
            continue
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor='g',
                          linewidth=0.5,
                          alpha=box_alpha))
        if show_class:
            ax.text(bbox[0],
                    bbox[1] - 2,
                    get_class_string(classes[i], score, dataset),
                    fontsize=3,
                    family='serif',
                    bbox=dict(facecolor='g',
                              alpha=0.4,
                              pad=0,
                              edgecolor='none'),
                    color='white')
        if segms is not None and len(segms) > i:
            print("duoshaogeIIIIIIIIIIIIIIIIIIIIIIIIIII")
            print(i)
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1
            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]
            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)
            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=0.2,
                                  alpha=0.5)
                ax.add_patch(polygon)
                woca = c.reshape((-1, 2))
                arr = np.array(woca)
                key = np.unique(woca)
                result = {}
                k_qu = []
                arr_end = []
                arr_end2 = []
                arr_endlast = []
                A = []
                B = []
                Bplus = []
                jjj = []
                Bwanmei = []
                Bcao = []
                Bao = []

                for k in key:
                    mask = (arr == k)
                    arr_new = arr[mask]
                    v = arr_new.size
                    result[k] = v
                    x = np.argwhere(arr == k)
                    if v > 1:
                        x = np.argwhere(arr == k)
                        x = np.array(x)
                        x0 = arr[:, 0]
                        y0 = arr[:, 1]
                        y0lie = []
                        for i in range(0, len(x0)):
                            if x0[i] != k:
                                pass
                            if x0[i] == k:
                                y0lie.append(y0[i])
                        y0lienew = []
                        arr_new = []
                        arr_new_2 = []
                        if y0lie == []:
                            pass
                        else:
                            miny0 = np.min(y0lie)
                            maxy0 = np.max(y0lie)
                            for i in range(miny0, maxy0 + 1):
                                y0lienew.append(i)
                            y0liezuizhong = []
                            if y0lienew == []:
                                pass
                            else:
                                miny0lienew = np.min(y0lienew)
                                maxy0lienew = np.max(y0lienew)
                                for i in range(miny0lienew, maxy0lienew + 1):
                                    y0liezuizhong.append(i)
                            for i in range(0, len(y0liezuizhong)):
                                arr_temp = [k, y0liezuizhong[i]]
                                arr_new.append(arr_temp)
                            arr_end.append(arr_new)
                        x0lie = []
                        for i in range(0, len(y0)):
                            if y0[i] != k:
                                pass
                            if y0[i] == k:
                                x0lie.append(x0[i])
                        x0lienew = []
                        arr_new2 = []
                        if x0lie == []:
                            pass
                        else:
                            minx0 = np.min(x0lie)
                            maxx0 = np.max(x0lie)
                            for i in range(minx0, maxx0 + 1):
                                x0lienew.append(i)
                            x0liezuizhong = []
                            if x0lienew == []:
                                pass
                            else:
                                minx0lienew = np.min(x0lienew)
                                maxx0lienew = np.max(x0lienew)
                                for i in range(minx0lienew, maxx0lienew + 1):
                                    x0liezuizhong.append(i)
                            for i in range(0, len(x0liezuizhong)):
                                arr_temp = [x0liezuizhong[i], k]
                                arr_new2.append(arr_temp)
                            arr_end2.append(arr_new2)
                arr_endlast = arr_end + arr_end2
                A = list(chain(*arr_endlast))
                B = np.array(list(set([tuple(t) for t in A])))
                if len(B) > 4:
                    Bplus = random.sample(B, 2)
                if len(B) < 4:
                    jjj = arr_endlast
                Bwanmei = Bplus + jjj
                Bcaotmp = list(chain(*Bwanmei))
                Bcao = np.array(Bcaotmp)
                Bao = Bcao.reshape(-1, 2)
                print("#####################################")
                print(Bao)
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(kps[0, i1],
                             kps[1, i1],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)
                if kps[2, i2] > kp_thresh:
                    plt.plot(kps[0, i2],
                             kps[1, i2],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)
    plt.close('all')
Exemplo n.º 18
0
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes,
                  segms=None,
                  keypoints=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext='pdf',
                  gt_entry=None):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    matches = []
    wrong_classes = []

    if gt_entry is not None:
        gt_boxes = gt_entry['boxes']
        gt_classes = gt_entry['gt_classes']

        areas_gt = (gt_boxes[:, 2] - gt_boxes[:, 0]) * (gt_boxes[:, 3] -
                                                        gt_boxes[:, 1])
        sorted_inds_gt = np.argsort(-areas_gt)

        matches, wrong_classes, matches_gt = match_gt_dt(
            boxes, sorted_inds, gt_boxes, sorted_inds_gt, classes, gt_classes,
            thresh)

        for i in sorted_inds_gt:
            bbox = gt_boxes[i, :]

            # only add ground-truth box if not matched
            if matches_gt[i] == 0 and not cfg.VIS.ONLY_DETS:
                ax.add_patch(
                    plt.Rectangle((bbox[0], bbox[1]),
                                  bbox[2] - bbox[0],
                                  bbox[3] - bbox[1],
                                  fill=False,
                                  edgecolor=cfg.VIS.GT_COLOR,
                                  linewidth=cfg.VIS.BOX.LINEWIDTH,
                                  alpha=box_alpha))
                if show_class or cfg.VIS.GT_SHOW_CLASS:
                    ax.text(bbox[0] + 1,
                            bbox[1] - 6,
                            get_class_string(gt_classes[i], 1.0, dataset),
                            fontsize=cfg.VIS.LABEL.FONTSIZE,
                            family=cfg.VIS.LABEL.FAMILY,
                            weight=cfg.VIS.LABEL.WEIGHT,
                            bbox=dict(facecolor=cfg.VIS.LABEL.GT_TEXTCOLOR,
                                      alpha=cfg.VIS.LABEL.ALPHA,
                                      pad=cfg.VIS.LABEL.PAD,
                                      edgecolor='none'),
                            color=cfg.VIS.LABEL.GT_TEXTCOLOR)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        edge_color = 'b'
        text_color = 'white'
        if gt_entry is not None and not cfg.VIS.ONLY_DETS:
            if matches[i] == -1:
                edge_color = cfg.VIS.FP_COLOR
                text_color = cfg.VIS.LABEL.FP_TEXTCOLOR
            elif matches[i] == 0:
                edge_color = cfg.VIS.FP_COLOR
                text_color = cfg.VIS.LABEL.FP_TEXTCOLOR
            elif matches[i] == 1:
                edge_color = cfg.VIS.DT_COLOR
                text_color = cfg.VIS.LABEL.DT_TEXTCOLOR

        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor=edge_color,
                          linewidth=cfg.VIS.BOX.LINEWIDTH,
                          alpha=cfg.VIS.BOX.ALPHA))

        # do not show label of not matched detections
        # if gt-boxes drawn: show_classes always for wrong (red) detections
        if not cfg.VIS.ONLY_DETS and (gt_entry is not None and matches[i] == 0
                                      and cfg.VIS.FP_SHOW_CLASS):
            if cfg.VIS.FP_SHOW_CORRECT_CLASS:
                ax.text(bbox[0] + 11,
                        bbox[1] - 6,
                        get_class_string(classes[i], score, dataset) +
                        '\n({})'.format(
                            get_class_string(wrong_classes[i], 1.0, dataset)),
                        fontsize=cfg.VIS.LABEL.FONTSIZE,
                        family=cfg.VIS.LABEL.FAMILY,
                        weight=cfg.VIS.LABEL.WEIGHT,
                        bbox=dict(facecolor=edge_color,
                                  alpha=cfg.VIS.LABEL.ALPHA,
                                  pad=cfg.VIS.LABEL.PAD,
                                  edgecolor='none'),
                        color=text_color)
            else:
                ax.text(bbox[0] + 1,
                        bbox[1] - 6,
                        get_class_string(classes[i], score, dataset),
                        fontsize=cfg.VIS.LABEL.FONTSIZE,
                        family=cfg.VIS.LABEL.FAMILY,
                        weight=cfg.VIS.LABEL.WEIGHT,
                        bbox=dict(facecolor=edge_color,
                                  alpha=cfg.VIS.LABEL.ALPHA,
                                  pad=cfg.VIS.LABEL.PAD,
                                  edgecolor='none'),
                        color=text_color)
        elif not cfg.VIS.ONLY_DETS and (gt_entry is not None and matches[i]
                                        == 1 and cfg.VIS.DT_SHOW_CLASS):
            ax.text(bbox[0] + 1,
                    bbox[1] - 6,
                    get_class_string(classes[i], score, dataset),
                    fontsize=cfg.VIS.LABEL.FONTSIZE,
                    family=cfg.VIS.LABEL.FAMILY,
                    weight=cfg.VIS.LABEL.WEIGHT,
                    bbox=dict(facecolor=edge_color,
                              alpha=cfg.VIS.LABEL.ALPHA,
                              pad=cfg.VIS.LABEL.PAD,
                              edgecolor='none'),
                    color=text_color)
        elif show_class and cfg.VIS.ONLY_DETS:
            ax.text(bbox[0] + 1,
                    bbox[1] - 6,
                    get_class_string(classes[i], score, dataset),
                    fontsize=cfg.VIS.LABEL.FONTSIZE,
                    family=cfg.VIS.LABEL.FAMILY,
                    weight=cfg.VIS.LABEL.WEIGHT,
                    bbox=dict(facecolor=edge_color,
                              alpha=cfg.VIS.LABEL.ALPHA,
                              pad=cfg.VIS.LABEL.PAD,
                              edgecolor='none'),
                    color=text_color)

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = ax.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    ax.plot(kps[0, i1],
                            kps[1, i1],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)
                if kps[2, i2] > kp_thresh:
                    ax.plot(kps[0, i2],
                            kps[1, i2],
                            '.',
                            color=colors[l],
                            markersize=3.0,
                            alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = ax.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = ax.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

        output_name = os.path.basename(im_name) + '.' + ext
        fig.savefig(os.path.join(output_dir, '{}'.format(output_name)),
                    dpi=dpi)
        plt.close('all')
Exemplo n.º 19
0
    'left_hip',
    'left_knee',
    'left_ankle',
    'left_eye',
    'right_eye',
    'left_ear',
    'right_ear',
    'bkg',  # special case for bkg
]


def get_json(data):
    return json.dumps(data, indent=None, separators=(',', ':'))


coco_keyps, _ = keypoint_utils.get_keypoints()

BOX_THRESH = 0.9
KEYP_THRESH = 2
MAX_X = 720
MAX_Y = 1280


def convert_pose_data(data):
    new_data = np.full(38, np.nan)

    for i, keyp in enumerate(KEYPS_IDX):
        idx = i * 2
        if keyp == 'bkg':
            pass
        elif keyp == 'neck':
Exemplo n.º 20
0
def vis_one_image(
        im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9,
        kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
        ext=None):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None and len(segms) > 0:
        masks = mask_util.decode(segms)
    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    print ("arv9")
    print('canshu%s:%s' % (9, sys.argv[9]))
    C=[]
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False, edgecolor='g',
                          linewidth=0.5, alpha=box_alpha))

        if show_class:
            ax.text(
                bbox[0], bbox[1] - 2,
                get_class_string(classes[i], score, dataset),
                fontsize=3,
                family='serif',
                bbox=dict(
                    facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                color='white')
	
        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]
            _, contour, hier = cv2.findContours(
                e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(
                    c.reshape((-1, 2)),
                    fill=True, facecolor=color_mask,
                    edgecolor='w', linewidth=0.2,
                    alpha=0.5)
                ax.add_patch(polygon)
		woca= c.reshape((-1, 2))
		arr = np.array(woca)
		key = np.unique(woca)
		result = {}
		k_qu=[]
		arr_end=[]
		arr_end2=[]
		arr_endlast=[]
		A=[]
		B=[]
		Bplus=[]
		jjj=[]
		Bwanmei=[]
		Bcao=[]
		Bao=[]												
		for k in key:
			mask = (arr == k)
			arr_new = arr[mask]
			v = arr_new.size
			result[k] = v
			x=np.argwhere(arr== k)
			if v>1:
				x=np.argwhere(arr== k)
				x=np.array(x)
				x0=arr[:,0]
				y0=arr[:,1]
				y0lie=[]
				for i in range(0,len(x0)):
					if x0[i]!=k:
						a=[]
					if x0[i]==k:
						y0lie.append(y0[i])					
				y0lienew=[]
				arr_new=[]
				arr_new_2=[]
				if y0lie==[]:
					a=[]
				else:
					miny0=np.min(y0lie)
					maxy0=np.max(y0lie)
					for i in range(miny0,maxy0+1):
						y0lienew.append(i)
					y0liezuizhong=[]
					if y0lienew==[]:
						a=[]
					else:
						miny0lienew=np.min(y0lienew)
						maxy0lienew=np.max(y0lienew)
						for i in range(miny0lienew,maxy0lienew+1):
							y0liezuizhong.append(i)				
					for i in range(0,len(y0liezuizhong)):
						arr_temp=[k,y0liezuizhong[i]]
						arr_new.append(arr_temp)
					arr_end.append(arr_new)
				x0lie=[]
				for i in range(0,len(y0)):
					if y0[i]!=k:
						a=[]
					if y0[i]==k:
						x0lie.append(x0[i])					
				x0lienew=[]
				arr_new2=[]
				if x0lie==[]:
					a=[]
				else:
					minx0=np.min(x0lie)
					maxx0=np.max(x0lie)
					for i in range(minx0,maxx0+1):
						x0lienew.append(i)
					x0liezuizhong=[]
					if x0lienew==[]:
						a=[]
					else:
						minx0lienew=np.min(x0lienew)
						maxx0lienew=np.max(x0lienew)
						for i in range(minx0lienew,maxx0lienew+1):
							x0liezuizhong.append(i)						
					for i in range(0,len(x0liezuizhong)):
						arr_temp=[x0liezuizhong[i],k]
						arr_new2.append(arr_temp)
					arr_end2.append(arr_new2)
		arr_endlast=arr_end+arr_end2
		A=list(chain(*arr_endlast))
		B=np.array(list(set([tuple(t) for t in A])))
		print ("********************************")

			
						

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(
                        kps[0, i1], kps[1, i1], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(
                        kps[0, i2], kps[1, i2], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (
                kps[:2, dataset_keypoints.index('right_hip')] +
                kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh and
                    kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines) + 1], linewidth=1.0,
                    alpha=0.7)
    #print ("CCCCCCCCCCCCCCCCCCC*********************")
    #C=np.array(C)
    #print(C)
    output_name = os.path.basename(im_name)
    fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')
Exemplo n.º 21
0
def vis_one_image(im,
                  im_name,
                  output_dir,
                  boxes,
                  segms=None,
                  keypoints=None,
                  thresh=0.9,
                  kp_thresh=2,
                  dpi=200,
                  box_alpha=0.0,
                  dataset=None,
                  show_class=False,
                  ext='jpg'):
    """Visual debugging of detections."""
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        return

    dataset_keypoints, _ = keypoint_utils.get_keypoints()

    if segms is not None:
        masks = mask_util.decode(segms)

    color_list = colormap(rgb=True) / 255

    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    ax.imshow(im)

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue

        # show box (off by default)
        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1],
                          fill=False,
                          edgecolor='g',
                          linewidth=0.5,
                          alpha=box_alpha))

        if show_class:
            ax.text(bbox[0],
                    bbox[1] - 2,
                    get_class_string(classes[i], score, dataset),
                    fontsize=10,
                    family='serif',
                    bbox=dict(facecolor='g',
                              alpha=0.4,
                              pad=0,
                              edgecolor='none'),
                    color='white')

        # show mask
        if segms is not None and len(segms) > i:
            img = np.ones(im.shape)
            color_mask = color_list[mask_color_id % len(color_list), 0:3]
            mask_color_id += 1

            w_ratio = .4
            for c in range(3):
                color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
            for c in range(3):
                img[:, :, c] = color_mask[c]
            e = masks[:, :, i]

            _, contour, hier = cv2.findContours(e.copy(), cv2.RETR_CCOMP,
                                                cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(c.reshape((-1, 2)),
                                  fill=True,
                                  facecolor=color_mask,
                                  edgecolor='w',
                                  linewidth=1.2,
                                  alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = plt.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    plt.plot(kps[0, i1],
                             kps[1, i1],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

                if kps[2, i2] > kp_thresh:
                    plt.plot(kps[0, i2],
                             kps[1, i2],
                             '.',
                             color=colors[l],
                             markersize=3.0,
                             alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (kps[:2, dataset_keypoints.index('right_hip')] +
                       kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh
                    and kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines)],
                         linewidth=1.0,
                         alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = plt.plot(x, y)
                plt.setp(line,
                         color=colors[len(kp_lines) + 1],
                         linewidth=1.0,
                         alpha=0.7)

    image = fig2data(fig)
    image = image[..., ::-1]

    cv2.imshow("img", image)
    cv2.waitKey(1)

    # output_name = os.path.basename(im_name) + '.' + ext
    # fig.savefig(os.path.join(output_dir, '{}'.format(output_name)), dpi=dpi)
    plt.close('all')
Exemplo n.º 22
0
def vis_one_image(
        im, im_name, output_dir, boxes, segms=None, keypoints=None, thresh=0.9,
        kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
        ext='png', class_name=None, save=False, draw_bbox=False, gray_masking=True):
    """Visual debugging of detections."""
    
    if save:
        if class_name is not None:
            output_dir = os.path.join(output_dir, class_name)

        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        
    if isinstance(boxes, list):
        boxes, segms, keypoints, classes = convert_from_cls_format(
            boxes, segms, keypoints)

    if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
        fig = plt.figure(frameon=False)
        fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
        ax = plt.Axes(fig, [0., 0., 1., 1.])
        ax.axis('off')
        fig.add_axes(ax)
        if gray_masking:
            im = create_grayscale_image(im)
        ax.imshow(im)
        buffer = io.BytesIO()
        output_name = os.path.basename(im_name) + '.' + ext
        fig.savefig(buffer, dpi=dpi)
        buffer.seek(0)
        pil_image = Image.open(buffer).convert("RGB")
        if save:
            pil_image.save(os.path.join(output_dir, '{}'.format(output_name)), 'png')
        plt.close('all')
        buffer.close()
        return pil_image

    if segms is not None:
        masks = mask_util.decode(segms)

    dataset_keypoints, _ = keypoint_utils.get_keypoints()
    kp_lines = kp_connections(dataset_keypoints)
    cmap = plt.get_cmap('rainbow')
    colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]

    # Display in largest to smallest order to reduce occlusion
    areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
    sorted_inds = np.argsort(-areas)

    valid_masks = []
    for i in sorted_inds:
        score = boxes[i, -1]
        if score < thresh:
            continue
        if segms is not None and len(segms) > i:
            e = masks[:, :, i]
            valid_masks.append(e)
    valid_masks = np.stack(valid_masks, axis=2)
    fig = plt.figure(frameon=False)
    fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)
    ax = plt.Axes(fig, [0., 0., 1., 1.])
    ax.axis('off')
    fig.add_axes(ax)
    if gray_masking:
        im = create_grayscale_image(im, np.sum(valid_masks, axis=2)>0)
    ax.imshow(im)
    buffer = io.BytesIO()

    # Generate random colors
    color_list = random_colors(len(sorted_inds))
    mask_color_id = 0
    for i in sorted_inds:
        bbox = boxes[i, :4]
        score = boxes[i, -1]
        if score < thresh:
            continue
        # show box (off by default, box_alpha=0.0)
        colorval = "#%02x%02x%02x" % (255, 255, 110)
        if draw_bbox:
            ax.add_patch(
                plt.Rectangle((bbox[0], bbox[1]),
                            bbox[2] - bbox[0],
                            bbox[3] - bbox[1],
                            fill=False, edgecolor=colorval,
                            linewidth=0.8, alpha=box_alpha))

        if show_class:
            ax.text(
                bbox[0], bbox[1] - 2,
                get_class_string(classes[i], score, dataset),
                fontsize=3,
                family='serif',
                bbox=dict(
                    facecolor='g', alpha=0.4, pad=0, edgecolor='none'),
                color='white')

        # show mask
        if segms is not None and len(segms) > i:
            color_mask = color_list[mask_color_id]
            mask_color_id += 1

            e = masks[:, :, i]

            contour, hier = cv2.findContours(
                e.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)

            for c in contour:
                polygon = Polygon(
                    c.reshape((-1, 2)),
                    fill=True, facecolor=color_mask, edgecolor=color_mask, linewidth=1.0,
                    alpha=0.5)
                ax.add_patch(polygon)

        # show keypoints
        if keypoints is not None and len(keypoints) > i:
            kps = keypoints[i]
            plt.autoscale(False)
            for l in range(len(kp_lines)):
                i1 = kp_lines[l][0]
                i2 = kp_lines[l][1]
                if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
                    x = [kps[0, i1], kps[0, i2]]
                    y = [kps[1, i1], kps[1, i2]]
                    line = ax.plot(x, y)
                    plt.setp(line, color=colors[l], linewidth=1.0, alpha=0.7)
                if kps[2, i1] > kp_thresh:
                    ax.plot(
                        kps[0, i1], kps[1, i1], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)
                if kps[2, i2] > kp_thresh:
                    ax.plot(
                        kps[0, i2], kps[1, i2], '.', color=colors[l],
                        markersize=3.0, alpha=0.7)

            # add mid shoulder / mid hip for better visualization
            mid_shoulder = (
                kps[:2, dataset_keypoints.index('right_shoulder')] +
                kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
            sc_mid_shoulder = np.minimum(
                kps[2, dataset_keypoints.index('right_shoulder')],
                kps[2, dataset_keypoints.index('left_shoulder')])
            mid_hip = (
                kps[:2, dataset_keypoints.index('right_hip')] +
                kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
            sc_mid_hip = np.minimum(
                kps[2, dataset_keypoints.index('right_hip')],
                kps[2, dataset_keypoints.index('left_hip')])
            if (sc_mid_shoulder > kp_thresh and
                    kps[2, dataset_keypoints.index('nose')] > kp_thresh):
                x = [mid_shoulder[0], kps[0, dataset_keypoints.index('nose')]]
                y = [mid_shoulder[1], kps[1, dataset_keypoints.index('nose')]]
                line = ax.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines)], linewidth=1.0, alpha=0.7)
            if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
                x = [mid_shoulder[0], mid_hip[0]]
                y = [mid_shoulder[1], mid_hip[1]]
                line = ax.plot(x, y)
                plt.setp(
                    line, color=colors[len(kp_lines) + 1], linewidth=1.0,
                    alpha=0.7)

    output_name = os.path.basename(im_name) + '.' + ext
    fig.savefig(buffer, dpi=dpi)
    buffer.seek(0)
    pil_image = Image.open(buffer).convert("RGB")
    if save:
        pil_image.save(os.path.join(output_dir, '{}'.format(output_name)), 'png')
    plt.close('all')
    buffer.close()
    return pil_image