Ejemplo n.º 1
0
def test():
    base_path = '/Users/shuzhiliu/Google Drive/KyoceraRobotAI/mmdetection_tools/data'
    imagefolder_path = '/Users/shuzhiliu/Google Drive/KyoceraRobotAI/mmdetection_tools/LocalData_Images'
    dataset_id = '1940091026744'
    image_id = '20191119T063709-cca043ed-32fe-4da0-ba75-e4a12b88eef4'
    data1 = CocoTools(
        json_file=f"{base_path}/{dataset_id}/annotations/train.json",
        image_folder_path=imagefolder_path)
    img1 = data1.get_original_image(image_id=image_id)
    print(data1.images)
    bboxes = data1.get_original_bboxes_list(image_id=image_id)
    print(bboxes)
    # img1 = np.zeros(shape=(720,1280,3), dtype=np.uint8)
    for bbox in bboxes:
        color = (random.randint(0, 255), random.randint(0, 255),
                 random.randint(0, 255))
        # cv.rectangle(img1,(bbox[0],bbox[1]), (bbox[2],bbox[3]), 255)
        # print(bbox)
        # cv.rectangle(img=img1,rec=(bbox[1],bbox[0],bbox[3]-bbox[1],bbox[2]-bbox[0]), color=color, thickness=4)
        cv.rectangle(img1, (bbox[1], bbox[0]), (bbox[3], bbox[2]), color, 4)
    plt.imshow(img1)
    plt.show()

    g1 = GenCandidateAnchors()
    print(len(g1.anchor_candidates_list))
    ious = BboxTools.ious(g1.anchor_candidates_list, bboxes[0])
    ious[np.argmax(ious)] = 1
    print(len(ious))
    ious_np = np.reshape(ious, newshape=(23, 40, 9))
    index = np.where(ious_np == 1)
    print(index)
Ejemplo n.º 2
0
    def gen_target_anchor_bboxes_classes_for_debug(self,
                                                   image_id,
                                                   debuginfo=False):
        bboxes = self.dataset_coco.get_original_bboxes_list(image_id=image_id)
        sparse_targets = self.dataset_coco.get_original_category_sparse_list(
            image_id=image_id)

        bboxes_ious = []  # for each gt_bbox calculate ious with candidates
        for bbox in bboxes:
            ious = BboxTools.ious(
                self.gen_candidate_anchors.anchor_candidates_list, bbox)
            ious_temp = np.ones(shape=(len(ious)), dtype=np.float) * 0.5
            # other author's implementations are use -1 to indicate ignoring, here use 0.5 to use max
            ious_temp = np.where(
                np.asarray(ious) > self.threshold_iou_rpn, 1, ious_temp)
            ious_temp = np.where(np.asarray(ious) < 0.3, 0, ious_temp)
            ious_temp[np.argmax(ious)] = 1
            bboxes_ious.append(ious_temp)

        # for each gt_box, determine the box reg target
        target_anchor_bboxes = []
        target_classes = []
        for index, bbox_ious in enumerate(bboxes_ious):
            ious_temp = np.reshape(
                bbox_ious,
                newshape=(self.gen_candidate_anchors.h,
                          self.gen_candidate_anchors.w,
                          self.gen_candidate_anchors.n_anchors))
            candidate_boxes = self.gen_candidate_anchors.anchor_candidates[
                np.where(ious_temp == 1)]
            n = candidate_boxes.shape[0]
            for i in range(n):
                target_anchor_bboxes.append(candidate_boxes[i])
                target_classes.append(sparse_targets[index])
        return target_anchor_bboxes, target_classes
Ejemplo n.º 3
0
def test():
    base_path = ''
    imagefolder_path = ''
    dataset_id = ''
    image_id = ''
    data1 = CocoTools(json_file=f"{base_path}/{dataset_id}/annotations/train.json",
                      image_folder_path=imagefolder_path)
    img1 = data1.get_original_image(image_id=image_id)
    print(data1.images)
    bboxes = data1.get_original_bboxes_list(image_id=image_id)
    print(bboxes)
    # img1 = np.zeros(shape=(720,1280,3), dtype=np.uint8)
    for bbox in bboxes:
        color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
        # cv.rectangle(img1,(bbox[0],bbox[1]), (bbox[2],bbox[3]), 255)
        # print(bbox)
        # cv.rectangle(img=img1,rec=(bbox[1],bbox[0],bbox[3]-bbox[1],bbox[2]-bbox[0]), color=color, thickness=4)
        cv.rectangle(img1, (bbox[1], bbox[0]), (bbox[3], bbox[2]), color, 4)
    plt.imshow(img1)
    plt.show()

    g1 = GenCandidateAnchors()
    print(len(g1.anchor_candidates_list))
    ious = BboxTools.ious(g1.anchor_candidates_list, bboxes[0])
    ious[np.argmax(ious)] = 1
    print(len(ious))
    ious_np = np.reshape(ious, newshape=(23, 40, 9))
    index = np.where(ious_np == 1)
    print(index)
Ejemplo n.º 4
0
    def gen_train_target_anchor_boxreg_for_rpn(self, image_id, debuginfo=False):
        bboxes = self.dataset_coco.get_original_bboxes_list(image_id=image_id)

        # === resize ===

        bboxes_ious = []  # for each gt_bbox calculate ious with candidates
        for bbox in bboxes:
            ious = BboxTools.ious(self.gen_candidate_anchors.anchor_candidates_list, bbox)
            ious_temp = np.ones(shape=(len(ious)), dtype=np.float) * 0.5
            # other author's implementations are use -1 to indicate ignoring, here use 0.5 to use max
            ious_temp = np.where(np.asarray(ious) > self.threshold_iou_rpn, 1, ious_temp)
            ious_temp = np.where(np.asarray(ious) < 0.3, 0, ious_temp)
            ious_temp[np.argmax(ious)] = 1
            bboxes_ious.append(ious_temp)

        # for each candidate anchor, determine the anchor target
        anchors_target = np.array(bboxes_ious)
        anchors_target = np.max(anchors_target, axis=0)
        anchors_target = np.reshape(anchors_target, newshape=(
            self.gen_candidate_anchors.h, self.gen_candidate_anchors.w, self.gen_candidate_anchors.n_anchors))
        if debuginfo:
            print(f"[Debug INFO] Number of total gt bboxes :{len(bboxes)}")
            print(
                f"[Debug INFO] Number of total target anchors: {anchors_target[np.where(anchors_target == 1)].shape[0]}")
            print(f"[Debug INFO] Shape of anchors_target: {anchors_target.shape}")
            print(
                f"[Debug INFO] Selected anchors: \n {self.gen_candidate_anchors.anchor_candidates[np.where(anchors_target == 1)]}")
        # test
        # self.anchor_generator.anchors_candidate[np.where(anchors_target==1)] = self.anchor_generator.anchors_candidate[np.where(anchors_target==1)] +100
        # print(f"Selected anchors: \n {self.anchor_generator.anchors_candidate[np.where(anchors_target == 1)]}")

        # for each gt_box, determine the box reg target
        bbox_reg_target = np.zeros(
            shape=(self.gen_candidate_anchors.h, self.gen_candidate_anchors.w, self.gen_candidate_anchors.n_anchors, 4),
            dtype=np.float)
        for index, bbox_ious in enumerate(bboxes_ious):
            ious_temp = np.reshape(bbox_ious, newshape=(
                self.gen_candidate_anchors.h, self.gen_candidate_anchors.w, self.gen_candidate_anchors.n_anchors))
            gt_box = bboxes[index]
            candidate_boxes = self.gen_candidate_anchors.anchor_candidates[np.where(ious_temp == 1)]
            # print(candidate_boxes,gt_box)
            box_reg = BboxTools.bbox_regression_target(candidate_boxes, gt_box)
            # print(box_reg)
            # print(bbox_tools.bbox_reg2truebox(candidate_boxes, box_reg))
            bbox_reg_target[np.where(ious_temp == 1)] = box_reg

        return anchors_target, bbox_reg_target
Ejemplo n.º 5
0
    def gen_train_data_roi_one(self, image_id, bbox_list):
        gt_bboxes = self.dataset_coco.get_original_bboxes_list(
            image_id=image_id)
        sparse_targets = self.dataset_coco.get_original_category_sparse_list(
            image_id=image_id)

        bboxes_ious = []  # for each gt_bbox calculate ious with candidates
        for bbox in gt_bboxes:
            ious = BboxTools.ious(bbox_list, bbox)
            ious_temp = np.zeros(shape=(len(ious)), dtype=np.float)
            # other author's implementations are use -1 to indicate ignoring, here use 0.5 to use max
            ious_temp = np.where(
                np.asarray(ious) > self.threshold_iou_roi, 1, ious_temp)
            ious_temp[np.argmax(ious)] = 1
            bboxes_ious.append(ious_temp)

        # for each gt_box, determine the box reg target
        original_img = self.gen_train_input_one(image_id)
        input_images = []
        input_box_filtered_by_iou = []
        target_classes = []
        target_bbox_reg = []
        for index_gt, bbox_ious in enumerate(bboxes_ious):
            candidate_boxes = np.asarray(bbox_list)[np.where(bbox_ious == 1)]
            n = candidate_boxes.shape[0]
            for i in range(n):
                input_box_filtered_by_iou.append(candidate_boxes[i].astype(
                    np.float))
                box_reg = BboxTools.bbox_regression_target(
                    pred_boxes=candidate_boxes[i].reshape((1, 4)),
                    gt_box=gt_bboxes[index_gt])
                target_bbox_reg.append(box_reg.ravel())
                target_classes.append(sparse_targets[index_gt])
                input_images.append(original_img.astype(np.float))
        for index_gt, bbox_gt in enumerate(gt_bboxes):
            input_images.append(original_img.astype(np.float))
            input_box_filtered_by_iou.append(bbox_gt.astype(np.float))
            target_classes.append(sparse_targets[index_gt])
            target_bbox_reg.append(np.array([0, 0, 0, 0], dtype=np.float))
        return np.asarray(input_images).astype(
            np.float), np.asarray(input_box_filtered_by_iou), np.asarray(
                target_classes), np.asarray(target_bbox_reg)