示例#1
0
    def test_compute_gt(self):
        level = 3
        ds = voc.build_dataset('test/data/VOC2007', im_input_size=(512, 512))

        anchors = self.generate_anchors(config.AnchorsConfig(), 512)
        im, (l, bbs) = next(iter(ds.take(1)))

        gt_reg, gt_labels = utils.anchors.anchor_targets_bbox(
            anchors, tf.expand_dims(im, 0), tf.expand_dims(bbs, 0),
            tf.expand_dims(l, 0), len(voc.IDX_2_LABEL))

        nearest_anchors = anchors[gt_reg[0, :, -1] == 1].numpy()
        im_random = unnormalize_image(im)
        im_random = visualizer.draw_boxes(im_random, nearest_anchors)
        im_random = visualizer.draw_boxes(im_random, bbs, colors=[0, 0, 255])

        for label in l:
            print(voc.IDX_2_LABEL[int(label)])

        plt.imshow(im_random)
        plt.show(block=True)

        print('GT shapes:', gt_labels.shape, gt_reg.shape)
        print('Found any overlapping anchor?', np.any(gt_labels[:, :,
                                                                -1] == 1.))
示例#2
0
    def test_regress_boxes(self):
        print('Regress anchors test')

        ds = voc.build_dataset('test/data/VOC2007', im_input_size=(512, 512))

        anchors = self.generate_anchors(config.AnchorsConfig(), 512)
        im, (l, bbs) = next(iter(ds.take(1)))

        gt_reg, gt_labels = utils.anchors.anchor_targets_bbox(
            anchors, tf.expand_dims(im, 0), tf.expand_dims(bbs, 0),
            tf.expand_dims(l, 0), len(voc.IDX_2_LABEL))

        near_mask = gt_reg[0, :, -1] == 1
        nearest_regressors = tf.expand_dims(
            tf.boolean_mask(gt_reg[0], near_mask)[:, :-1], 0)
        nearest_anchors = tf.expand_dims(anchors[near_mask], 0)

        # apply regression to boxes
        regressed_boxes = utils.bndbox.regress_bndboxes(
            nearest_anchors, nearest_regressors)

        im_random = unnormalize_image(im)
        im_random = visualizer.draw_boxes(im_random, regressed_boxes[0])

        plt.imshow(im_random)
        plt.show(block=True)
    def test_regress_boxes(self):
        print('Regress anchors test')

        level = 3
        ds = voc.build_dataset('test/data/VOC2007',
                               im_input_size=(512, 512))

        anchors = self.generate_anchors(config.AnchorsConfig(), 
                                        512)
        
        for im, (l, bbs) in ds.take(1):
            
            gt_reg, gt_labels = \
                utils.anchors.anchor_targets_bbox(anchors.numpy(), 
                                                  im.numpy(), 
                                                  bbs.numpy(), l.numpy(), 
                                                  len(voc.IDX_2_LABEL))
            near_mask = gt_reg[0, :, -1] == 1
            nearest_regressors = tf.expand_dims(gt_reg[0, near_mask][:, :-1], 0)
            nearest_anchors = tf.expand_dims(anchors[near_mask], 0)

            # apply regression to boxes
            regressed_boxes = utils.bndbox.regress_bndboxes(nearest_anchors, 
                                                            nearest_regressors)

            im_random = im[0].numpy()
            for box in regressed_boxes[0].numpy():
                box = box.astype('int32')
                cv2.rectangle(im_random, 
                              (box[0], box[1]), 
                              (box[2], box[3]), (0, 255, 0), 1)
            
            cv2.imshow('', im_random)
            cv2.waitKey()
示例#4
0
    def test_compute_gt(self):
        level = 3
        ds = voc.build_dataset('test/data/VOC2007', im_input_size=(512, 512))

        anchors = self.generate_anchors(config.AnchorsConfig(), 512)

        for im, (l, bbs) in ds.take(1):

            gt_reg, gt_labels = utils.anchors.anchor_targets_bbox(
                anchors, im, bbs, l, len(voc.IDX_2_LABEL))

            nearest_anchors = anchors[gt_reg[0, :, -1] == 1].numpy()
            im_random = im[0].numpy()
            for box in nearest_anchors:
                box = box.astype('int32')
                cv2.rectangle(im_random, (box[0], box[1]), (box[2], box[3]),
                              (0, 255, 0), 1)

            for box in bbs.numpy()[0]:
                box = box.astype('int32')
                cv2.rectangle(im_random, (box[0], box[1]), (box[2], box[3]),
                              (0, 0, 255), 3)

            for label in l[0]:
                print(voc.IDX_2_LABEL[int(label)])

            plt.imshow(im_random)
            plt.show(block=True)

            print('GT shapes:', gt_labels.shape, gt_reg.shape)
            print('Found any overlapping anchor?',
                  np.any(gt_labels[:, :, -1] == 1.))
示例#5
0
    def test_compute_gt(self):
        ds = voc.build_dataset('test/data/VOC2007',
                               im_input_size=(512, 512),
                               shuffle=False)
        ds = ds.skip(1).batch(1)

        wrapped_ds = utils.training.wrap_detection_dataset(ds, (512, 512), 20)
        anchors = self.generate_anchors(config.AnchorsConfig(), 512)

        im, (regressors, l) = next(iter(wrapped_ds.take(1)))

        im = unnormalize_image(im[0])
        near_mask = regressors[0, :, -1] == 1
        nearest_regressors = tf.expand_dims(
            tf.boolean_mask(regressors[0], near_mask)[:, :-1], 0)
        nearest_anchors = tf.expand_dims(anchors[near_mask], 0)

        # apply regression to boxes
        regressed_boxes = utils.bndbox.regress_bndboxes(
            nearest_anchors, nearest_regressors)

        im = utils.visualizer.draw_boxes(im,
                                         nearest_anchors[0],
                                         colors=[(255, 255, 0)])
        im = utils.visualizer.draw_boxes(im,
                                         regressed_boxes[0],
                                         colors=[(0, 255, 255)])

        plt.imshow(im)
        plt.axis('off')
        plt.show(block=True)

        print('GT shapes:', l.shape, regressors.shape)
        print('Found any overlapping anchor?',
              tf.reduce_any(tf.equal(l[:, :, -1], 1.)))
示例#6
0
    def test_nms(self):
        n_classes = len(voc.LABEL_2_IDX)
        anchors_config = config.AnchorsConfig()

        ds = voc.build_dataset('test/data/VOC2007', im_input_size=(512, 512))

        anchors_gen = [
            utils.anchors.AnchorGenerator(size=anchors_config.sizes[i - 3],
                                          aspect_ratios=anchors_config.ratios,
                                          stride=anchors_config.strides[i - 3])
            for i in range(3, 8)
        ]

        sizes = (80, 40, 20, 10, 5)
        im, (l, bbs) = next(iter(ds.take(1)))

        anchors = [
            anchor_gen((size, size, 3))
            for anchor_gen, size in zip(anchors_gen, sizes)
        ]
        anchors = tf.concat(anchors, axis=0)

        gt_reg, gt_labels = utils.anchors.anchor_targets_bbox(
            anchors, tf.expand_dims(im, 0), tf.expand_dims(bbs, 0),
            tf.expand_dims(l, 0), n_classes)

        box_score = gt_labels[0]
        true_idx = tf.reshape(tf.where(box_score[:, -1] == 1), [-1])

        box_score = tf.gather(box_score, true_idx)
        anchors = tf.gather(anchors, true_idx)

        before_nms_shape = anchors.shape

        anchors = tf.expand_dims(anchors, 0)
        box_score = tf.expand_dims(box_score[:, :-1], 0)
        boxes, labels, scores = bb_utils.nms(anchors, box_score)
        after_nms_shape = boxes[0].shape

        if anchors.shape[0] != 0:
            self.assertTrue(after_nms_shape[0] < before_nms_shape[0],
                            'After nms boxes should be reduced')
        else:
            print('No ground truth anchors')

        im_random = utils.visualizer.draw_boxes(im, boxes[0])
        plt.imshow(im_random)
        plt.axis('off')
        plt.show(block=True)
示例#7
0
    def test_forward_inference(self):
        batch_size = 2
        num_classes = len(voc.IDX_2_LABEL)
        model = models.EfficientDet(num_classes=num_classes, D=0, weights=None)

        input_size = model.config.input_size

        ds = voc.build_dataset('test/data/VOC2007', im_input_size=input_size)
        images, annotations = next(iter(ds.take(1)))
        boxes, labels, scores = model(tf.expand_dims(images, 0),
                                      training=False)

        # TODO: migrate to tensors when output is padded
        for i in range(len(boxes)):
            self._compare_shapes(boxes[i].shape, [None, 4])
            self._compare_shapes(labels[i].shape, [None])