Esempio n. 1
0
    def test_object_detection_data_shape(self, config):
        """Check the tensor shape of classification dataset"""
        db = DBLoader()
        db.load_built_in(config.name)

        train_example = db.train.get_sample()
        test_example = db.train.get_sample()

        img_info = config.info["image"]
        expected_img_shape = \
            img_info["height"], img_info["width"], img_info["channel"]

        box_info = config.info["boxes"]
        nClass = box_info["classes"]
        nBox = max(
            box_info["train"]["max boxes of single data"],
            box_info["test"]["max boxes of single data"],
        )
        expected_box_shape = nBox, 4 + nClass

        for example in [train_example, test_example]:
            image = example["image"].numpy()
            boxes = example["boxes"].numpy()
            for img_shp, expected in zip(image.shape, expected_img_shape):
                assert (expected is None) or img_shp == expected
            assert boxes.shape == expected_box_shape
Esempio n. 2
0
def _check_acc_on_imagenet():
    loader = DBLoader()
    loader.load_built_in("imagenet", parser=ReshapeImageNet())
    imagenet = loader.test.to_tfdataset(1, 1)
    resnet50 = get_resnet()
    resnet50.compile(metrics="acc")
    resnet50.evaluate(x=imagenet)
Esempio n. 3
0
 def test_examine_image(self, tmp_path):
     imagenet = DBLoader()
     imagenet.load_built_in("imagenet", parser=RemapImagenet())
     dataset = imagenet.test.to_tfdataset(batch=1, epoch=1)
     dataset = dataset.take(20)
     for idx, data in enumerate(dataset):
         image, label = data
         img = image[0, ..., ::-1].numpy()
         cv2.imwrite(str(tmp_path.joinpath("{:02d}.bmp".format(idx))), img)
Esempio n. 4
0
def load_simpson(tmpdir):
    parser = SimpsonFMT()
    loader = DBLoader()
    loader.load(str(tmpdir), parser=parser)

    train = loader.train.to_tfdataset(epoch=1, batch=1)
    for example in train:
        print(example)
        break
Esempio n. 5
0
def load_mvtectad():
    parser = MVTectFMT()
    loader = DBLoader()

    loader.load(DST_DIR, parser=parser)
    train = loader.train.to_tfdataset(1, 1)
    for example in train:
        print(example)
        break
Esempio n. 6
0
    def test_loadable(self, config):
        """Can load all built-in datasets"""
        if config.name == "imagenet":
            pytest.skip(msg="count ImageNet takes too much time")

        db = DBLoader()
        assert db.info == ""
        assert db.train is None
        assert db.test is None

        db.load_built_in(config.name)
        assert db.train.count == config.train["count"]
        assert db.test.count == config.test["count"]
        assert db.info != ""
Esempio n. 7
0
    def test_inference_speed(self, model):
        if model == "resnet":
            model = resnet = tf.keras.applications.ResNet50V2(include_top=True,
                                                              input_shape=(256,
                                                                           256,
                                                                           3),
                                                              weights=None,
                                                              classes=1000)
        elif model == "resnext":
            model = ResNeXt50(input_shape=(256, 256, 3),
                              include_fc=True,
                              n_fc=1000,
                              load_pretrained=True)
            print(model.count_params())

        imagenet = DBLoader()
        imagenet.load_built_in("imagenet", parser=RemapImagenet())

        dataset = imagenet.test.to_tfdataset(batch=1, epoch=1)
        dataset = dataset.take(1000)
        start = time.time()
        model.predict(dataset)
        print(time.time() - start)
Esempio n. 8
0
    def test_classification_data_shape(self, config):
        """Check the tensor shape of classification dataset"""
        db = DBLoader()
        db.load_built_in(config.name)

        train_example = db.train.get_sample()
        test_example = db.train.get_sample()

        info = config.info["image"]
        if config.name == "imagenet":
            expected_shape = (256, 256, 3)
        else:
            expected_shape = info["height"], info["width"], info["channel"]

        for example in [train_example, test_example]:
            image = example["image"].numpy()
            label = example["label"].numpy()
            assert image.shape == expected_shape

            if config.name == "imagenet":
                assert label.shape == (1000, )
            else:
                assert label.shape == ()
Esempio n. 9
0
    def manually_examine_seg(
        self,
        nImage: int,
        out_dir: pathlib.Path,
        resize_shape=None,
    ):
        parser = CoCoSeg(one_hot_class=False, resize_shape=resize_shape)

        db = DBLoader()
        db.load_built_in("coco-seg", parser=parser)

        ds = db.train.to_tfdataset(epoch=1, batch=1)
        for cnt, example in enumerate(ds):
            image = example["image"].numpy()[0, ...].astype(np.uint8)
            image = image[..., ::-1]
            image = np.array(image)

            mask = example["segment_mask"].numpy()[0, ..., 0]

            img = self.get_masked_image(image, mask)
            img.save(str(out_dir.joinpath("train_{}.bmp".format(cnt))))
            if cnt == nImage:
                break

        ds = db.test.to_tfdataset(epoch=1, batch=1)
        for cnt, example in enumerate(ds):
            image = example["image"].numpy()[0, ...].astype(np.uint8)
            image = image[..., ::-1]
            image = np.array(image)

            mask = example["segment_mask"].numpy()[0, ..., 0]

            img = self.get_masked_image(image, mask)
            img.save(str(out_dir.joinpath("test_{}.bmp".format(cnt))))
            if cnt == nImage:
                break
Esempio n. 10
0
    def manually_examine_bbox(
        self,
        nImage: int,
        out_dir: pathlib.Path,
        resize_shape=None,
    ):
        parser = CoCoObjDet(one_hot_class=False, resize_shape=resize_shape)

        db = DBLoader()
        db.load_built_in("coco-objdet", parser=parser)

        ds = db.train.to_tfdataset(epoch=1, batch=1)
        for cnt, example in enumerate(ds):
            image = example["image"].numpy()[0, ...].astype(np.uint8)
            image = image[..., ::-1]
            image = np.array(image)

            boxes = example["boxes"].numpy()[0, ...]
            self.draw_boxes(image, boxes)
            cv2.imwrite(str(out_dir.joinpath("train_{}.bmp".format(cnt))),
                        image)
            if cnt == nImage:
                break

        ds = db.test.to_tfdataset(epoch=1, batch=1)
        for cnt, example in enumerate(ds):
            image = example["image"].numpy()[0, ...].astype(np.uint8)
            image = image[..., ::-1]
            image = np.array(image)

            boxes = example["boxes"].numpy()[0, ...]
            self.draw_boxes(image, boxes)
            cv2.imwrite(str(out_dir.joinpath("test_{}.bmp".format(cnt))),
                        image)
            if cnt == nImage:
                break
Esempio n. 11
0
            image, (224, 224),
            preserve_aspect_ratio=False,
            antialias=False
        )
        label = example.pop("label")
        return image, label


if __name__ == "__main__":
    model = ResNeXt50(
        input_shape=(224, 224, 3),
        include_fc=True, n_fc=1000,
        load_pretrained=False
    )

    imagenet = DBLoader()
    imagenet.load_built_in("imagenet", parser=None)

    optimizer_params = {
        "learning_rate": 0.0025, "momentum": 0.9
    }
    hyper = {
        "SGD": str(optimizer_params),
        "lr_decay": 0.1,
        "loss": keras.losses.CategoricalCrossentropy(from_logits=True),
        "batch": 32,
    }
    logging.info("hyper info: {}".format(hyper))

    trainer = KerasBaseTrainer(
        model=model,