示例#1
0
    def test_from_folders(self, tmpdir):
        tmp_dir = Path(tmpdir)

        # create random dummy data

        os.makedirs(str(tmp_dir / "images"))
        os.makedirs(str(tmp_dir / "targets"))

        images = [
            str(tmp_dir / "images" / "img1.png"),
            str(tmp_dir / "images" / "img2.png"),
            str(tmp_dir / "images" / "img3.png"),
        ]

        targets = [
            str(tmp_dir / "targets" / "img1.png"),
            str(tmp_dir / "targets" / "img2.png"),
            str(tmp_dir / "targets" / "img3.png"),
        ]

        num_classes: int = 2
        img_size: Tuple[int, int] = (196, 196)
        create_random_data(images, targets, img_size, num_classes)

        # instantiate the data module

        dm = SemanticSegmentationData.from_folders(
            train_folder=str(tmp_dir / "images"),
            train_target_folder=str(tmp_dir / "targets"),
            val_folder=str(tmp_dir / "images"),
            val_target_folder=str(tmp_dir / "targets"),
            test_folder=str(tmp_dir / "images"),
            test_target_folder=str(tmp_dir / "targets"),
            batch_size=2,
            num_workers=0,
            num_classes=num_classes,
        )
        assert dm is not None
        assert dm.train_dataloader() is not None
        assert dm.val_dataloader() is not None
        assert dm.test_dataloader() is not None

        # check training data
        data = next(iter(dm.train_dataloader()))
        imgs, labels = data[DefaultDataKeys.INPUT], data[
            DefaultDataKeys.TARGET]
        assert imgs.shape == (2, 3, 196, 196)
        assert labels.shape == (2, 196, 196)

        # check val data
        data = next(iter(dm.val_dataloader()))
        imgs, labels = data[DefaultDataKeys.INPUT], data[
            DefaultDataKeys.TARGET]
        assert imgs.shape == (2, 3, 196, 196)
        assert labels.shape == (2, 196, 196)

        # check test data
        data = next(iter(dm.test_dataloader()))
        imgs, labels = data[DefaultDataKeys.INPUT], data[
            DefaultDataKeys.TARGET]
        assert imgs.shape == (2, 3, 196, 196)
        assert labels.shape == (2, 196, 196)
import flash
from flash.core.data.utils import download_data
from flash.image import SemanticSegmentation, SemanticSegmentationData

# 1. Create the DataModule
# The data was generated with the  CARLA self-driving simulator as part of the Kaggle Lyft Udacity Challenge.
# More info here: https://www.kaggle.com/kumaresanmanickavelu/lyft-udacity-challenge
download_data(
    "https://github.com/ongchinkiat/LyftPerceptionChallenge/releases/download/v0.1/carla-capture-20180513A.zip",
    "./data",
)

datamodule = SemanticSegmentationData.from_folders(
    train_folder="data/CameraRGB",
    train_target_folder="data/CameraSeg",
    val_split=0.1,
    transform_kwargs=dict(image_size=(256, 256)),
    num_classes=21,
    batch_size=4,
)

# 2. Build the task
model = SemanticSegmentation(
    backbone="mobilenetv3_large_100",
    head="fpn",
    num_classes=datamodule.num_classes,
)

# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=3, gpus=torch.cuda.device_count())
trainer.finetune(model, datamodule=datamodule, strategy="freeze")
示例#3
0
 def test_smoke(self):
     dm = SemanticSegmentationData()
     assert dm is not None
示例#4
0
    def test_map_labels(self, tmpdir):
        tmp_dir = Path(tmpdir)

        # create random dummy data

        images = [
            str(tmp_dir / "img1.png"),
            str(tmp_dir / "img2.png"),
            str(tmp_dir / "img3.png"),
        ]

        targets = [
            str(tmp_dir / "labels_img1.png"),
            str(tmp_dir / "labels_img2.png"),
            str(tmp_dir / "labels_img3.png"),
        ]

        labels_map: Dict[int, Tuple[int, int, int]] = {
            0: [0, 0, 0],
            1: [255, 255, 255],
        }

        num_classes: int = len(labels_map.keys())
        img_size: Tuple[int, int] = (196, 196)
        create_random_data(images, targets, img_size, num_classes)

        # instantiate the data module

        dm = SemanticSegmentationData.from_files(train_files=images,
                                                 train_targets=targets,
                                                 val_files=images,
                                                 val_targets=targets,
                                                 batch_size=2,
                                                 num_workers=0,
                                                 num_classes=num_classes)
        assert dm is not None
        assert dm.train_dataloader() is not None

        # disable visualisation for testing
        assert dm.data_fetcher.block_viz_window is True
        dm.set_block_viz_window(False)
        assert dm.data_fetcher.block_viz_window is False

        dm.show_train_batch("load_sample")
        dm.show_train_batch("to_tensor_transform")

        # check training data
        data = next(iter(dm.train_dataloader()))
        imgs, labels = data[DefaultDataKeys.INPUT], data[
            DefaultDataKeys.TARGET]
        assert imgs.shape == (2, 3, 196, 196)
        assert labels.shape == (2, 196, 196)
        assert labels.min().item() == 0
        assert labels.max().item() == 1
        assert labels.dtype == torch.int64

        # now train with `fast_dev_run`
        model = SemanticSegmentation(num_classes=2,
                                     backbone="torchvision/fcn_resnet50")
        trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
        trainer.finetune(model, dm, strategy="freeze_unfreeze")
import flash
from flash.core.data.utils import download_data
from flash.image import SemanticSegmentation, SemanticSegmentationData

# 1. Create the DataModule
# The data was generated with the  CARLA self-driving simulator as part of the Kaggle Lyft Udacity Challenge.
# More info here: https://www.kaggle.com/kumaresanmanickavelu/lyft-udacity-challenge
download_data(
    "https://github.com/ongchinkiat/LyftPerceptionChallenge/releases/download/v0.1/carla-capture-20180513A.zip",
    "./data",
)

datamodule = SemanticSegmentationData.from_folders(
    train_folder="data/CameraRGB",
    train_target_folder="data/CameraSeg",
    val_split=0.1,
    image_size=(256, 256),
    num_classes=21,
)

# 2. Build the task
model = SemanticSegmentation(
    backbone="mobilenetv3_large_100",
    head="fpn",
    num_classes=datamodule.num_classes,
)

# 3. Create the trainer and finetune the model
trainer = flash.Trainer(max_epochs=3, gpus=torch.cuda.device_count())
trainer.finetune(model, datamodule=datamodule, strategy="freeze")
示例#6
0
 def test_smoke():
     dm = SemanticSegmentationData(batch_size=1)
     assert dm is not None
示例#7
0
    def test_from_fiftyone(tmpdir):
        tmp_dir = Path(tmpdir)

        # create random dummy data

        images = [
            str(tmp_dir / "img1.png"),
            str(tmp_dir / "img2.png"),
            str(tmp_dir / "img3.png"),
        ]

        num_classes: int = 2
        img_size: Tuple[int, int] = (128, 128)

        for img_file in images:
            _rand_image(img_size).save(img_file)

        targets = [
            np.array(_rand_labels(img_size, num_classes)) for _ in range(3)
        ]

        dataset = fo.Dataset.from_dir(
            str(tmp_dir),
            dataset_type=fo.types.ImageDirectory,
        )

        for idx, sample in enumerate(dataset):
            sample["ground_truth"] = fo.Segmentation(mask=targets[idx][:, :,
                                                                       0])
            sample.save()

        # instantiate the data module

        dm = SemanticSegmentationData.from_fiftyone(
            train_dataset=dataset,
            val_dataset=dataset,
            test_dataset=dataset,
            predict_dataset=dataset,
            batch_size=2,
            num_workers=0,
            num_classes=num_classes,
        )
        assert dm is not None
        assert dm.train_dataloader() is not None
        assert dm.val_dataloader() is not None
        assert dm.test_dataloader() is not None

        # check training data
        data = next(iter(dm.train_dataloader()))
        imgs, labels = data[DataKeys.INPUT], data[DataKeys.TARGET]
        assert imgs.shape == (2, 3, 128, 128)
        assert labels.shape == (2, 128, 128)

        # check val data
        data = next(iter(dm.val_dataloader()))
        imgs, labels = data[DataKeys.INPUT], data[DataKeys.TARGET]
        assert imgs.shape == (2, 3, 128, 128)
        assert labels.shape == (2, 128, 128)

        # check test data
        data = next(iter(dm.test_dataloader()))
        imgs, labels = data[DataKeys.INPUT], data[DataKeys.TARGET]
        assert imgs.shape == (2, 3, 128, 128)
        assert labels.shape == (2, 128, 128)

        # check predict data
        data = next(iter(dm.predict_dataloader()))
        imgs = data[DataKeys.INPUT]
        assert imgs.shape == (2, 3, 128, 128)