Exemple #1
0
def get_config(runner, root_uri, data_uri=None, full_train=False):
    def get_path(part):
        if full_train:
            return join(data_uri, part)
        else:
            return join(dirname(__file__), part)

    class_config = ClassConfig(names=['car', 'building', 'background'],
                               colors=['red', 'blue', 'black'])

    def make_scene(img_path, label_path):
        id = basename(img_path)
        label_source = ChipClassificationLabelSourceConfig(
            vector_source=GeoJSONVectorSourceConfig(uri=label_path,
                                                    default_class_id=None,
                                                    ignore_crs_field=True),
            ioa_thresh=0.5,
            use_intersection_over_cell=False,
            pick_min_class_id=True,
            background_class_id=2,
            infer_cells=True)

        raster_source = RasterioSourceConfig(
            channel_order=[0, 1, 2],
            uris=[img_path],
            transformers=[StatsTransformerConfig()])

        return SceneConfig(id=id,
                           raster_source=raster_source,
                           label_source=label_source)

    scenes = [
        make_scene(get_path('scene/image.tif'), get_path('scene/labels.json')),
        make_scene(get_path('scene/image2.tif'),
                   get_path('scene/labels2.json'))
    ]
    scene_dataset = DatasetConfig(class_config=class_config,
                                  train_scenes=scenes,
                                  validation_scenes=scenes)

    chip_sz = 200
    img_sz = chip_sz
    data = ClassificationImageDataConfig(img_sz=img_sz, augmentors=[])

    if full_train:
        model = ClassificationModelConfig(backbone=Backbone.resnet18)
        solver = SolverConfig(lr=1e-4,
                              num_epochs=300,
                              batch_sz=8,
                              one_cycle=True,
                              sync_interval=300)
    else:
        pretrained_uri = (
            'https://github.com/azavea/raster-vision-data/releases/download/v0.12/'
            'chip-classification.pth')
        model = ClassificationModelConfig(backbone=Backbone.resnet18,
                                          init_weights=pretrained_uri)
        solver = SolverConfig(lr=1e-9,
                              num_epochs=1,
                              batch_sz=2,
                              one_cycle=True,
                              sync_interval=200)
    backend = PyTorchChipClassificationConfig(data=data,
                                              model=model,
                                              solver=solver,
                                              log_tensorboard=False,
                                              run_tensorboard=False)

    config = ChipClassificationConfig(root_uri=root_uri,
                                      dataset=scene_dataset,
                                      backend=backend,
                                      train_chip_sz=chip_sz,
                                      predict_chip_sz=chip_sz)

    return config
Exemple #2
0
def get_config(runner, root_uri, data_uri=None, full_train=False):
    def get_path(part):
        if full_train:
            return join(data_uri, part)
        else:
            return join(dirname(__file__), part)

    class_config = ClassConfig(names=['red', 'green'], colors=['red', 'green'])

    def make_scene(id, img_path, label_path):
        raster_source = RasterioSourceConfig(channel_order=[0, 1, 2],
                                             uris=[img_path])
        label_source = SemanticSegmentationLabelSourceConfig(
            rgb_class_config=class_config,
            raster_source=RasterioSourceConfig(uris=[label_path]))
        label_store = SemanticSegmentationLabelStoreConfig(
            rgb=True,
            vector_output=[
                PolygonVectorOutputConfig(class_id=0),
                BuildingVectorOutputConfig(class_id=1)
            ])

        return SceneConfig(id=id,
                           raster_source=raster_source,
                           label_source=label_source,
                           label_store=label_store)

    chip_sz = 300
    img_sz = chip_sz
    data = SemanticSegmentationImageDataConfig(img_sz=img_sz, augmentors=[])

    if full_train:
        model = SemanticSegmentationModelConfig(backbone=Backbone.resnet50)
        solver = SolverConfig(lr=1e-4,
                              num_epochs=300,
                              batch_sz=8,
                              one_cycle=True,
                              sync_interval=300)
    else:
        pretrained_uri = (
            'https://github.com/azavea/raster-vision-data/releases/download/v0.12/'
            'semantic-segmentation.pth')
        model = SemanticSegmentationModelConfig(backbone=Backbone.resnet50,
                                                init_weights=pretrained_uri)
        solver = SolverConfig(lr=1e-9,
                              num_epochs=1,
                              batch_sz=2,
                              one_cycle=True,
                              sync_interval=200)
    backend = PyTorchSemanticSegmentationConfig(data=data,
                                                model=model,
                                                solver=solver,
                                                log_tensorboard=False,
                                                run_tensorboard=False)

    scenes = [
        make_scene('test-scene', get_path('scene/image.tif'),
                   get_path('scene/labels.tif')),
        make_scene('test-scene2', get_path('scene/image2.tif'),
                   get_path('scene/labels2.tif'))
    ]
    scene_dataset = DatasetConfig(class_config=class_config,
                                  train_scenes=scenes,
                                  validation_scenes=scenes)

    chip_options = SemanticSegmentationChipOptions(
        window_method=SemanticSegmentationWindowMethod.sliding, stride=chip_sz)

    return SemanticSegmentationConfig(root_uri=root_uri,
                                      dataset=scene_dataset,
                                      backend=backend,
                                      train_chip_sz=chip_sz,
                                      predict_chip_sz=chip_sz,
                                      chip_options=chip_options)
Exemple #3
0
def get_config(runner, root_uri, data_uri=None, full_train=False):
    def get_path(part):
        if full_train:
            return join(data_uri, part)
        else:
            return join(dirname(__file__), part)

    class_config = ClassConfig(names=['car', 'building'],
                               colors=['blue', 'red'])

    def make_scene(scene_id, img_path, label_path):
        raster_source = RasterioSourceConfig(channel_order=[0, 1, 2],
                                             uris=[img_path])
        label_source = ObjectDetectionLabelSourceConfig(
            vector_source=GeoJSONVectorSourceConfig(uri=label_path,
                                                    default_class_id=None))
        return SceneConfig(id=scene_id,
                           raster_source=raster_source,
                           label_source=label_source)

    chip_sz = 300
    img_sz = chip_sz
    data = ObjectDetectionImageDataConfig(img_sz=img_sz, augmentors=[])

    if full_train:
        model = ObjectDetectionModelConfig(backbone=Backbone.resnet18)
        solver = SolverConfig(lr=1e-4,
                              num_epochs=300,
                              batch_sz=8,
                              one_cycle=True,
                              sync_interval=300)
    else:
        pretrained_uri = (
            'https://github.com/azavea/raster-vision-data/releases/download/v0.12/'
            'object-detection.pth')
        model = ObjectDetectionModelConfig(backbone=Backbone.resnet18,
                                           init_weights=pretrained_uri)
        solver = SolverConfig(lr=1e-9,
                              num_epochs=1,
                              batch_sz=2,
                              one_cycle=True,
                              sync_interval=200)
    backend = PyTorchObjectDetectionConfig(data=data,
                                           model=model,
                                           solver=solver,
                                           log_tensorboard=False,
                                           run_tensorboard=False)

    scenes = [
        make_scene('od_test', get_path('scene/image.tif'),
                   get_path('scene/labels.json')),
        make_scene('od_test-2', get_path('scene/image2.tif'),
                   get_path('scene/labels2.json'))
    ]
    scene_dataset = DatasetConfig(class_config=class_config,
                                  train_scenes=scenes,
                                  validation_scenes=scenes)

    chip_options = ObjectDetectionChipOptions(neg_ratio=1.0, ioa_thresh=1.0)
    predict_options = ObjectDetectionPredictOptions(merge_thresh=0.1,
                                                    score_thresh=0.5)

    return ObjectDetectionConfig(root_uri=root_uri,
                                 dataset=scene_dataset,
                                 backend=backend,
                                 train_chip_sz=chip_sz,
                                 predict_chip_sz=chip_sz,
                                 chip_options=chip_options,
                                 predict_options=predict_options)
Exemple #4
0
def get_config(runner,
               root_uri,
               json,
               dataset,
               catalog_dir='/vsizip//workdir',
               imagery_dir='/opt/data',
               chip_sz=512,
               N=None):

    chip_sz = int(chip_sz)

    if dataset == 'cloud':
        class_config = ClassConfig(
            names=[
                'algal_bloom', 'normal_water', 'cloud', 'cloud_shadow', 'other'
            ],
            colors=['green', 'blue', 'white', 'gray', 'brown'])
        class_id_filter_dict = {
            0: ['==', 'default', 'Algal bloom'],
            1: ['==', 'default', 'Non-algal-bloomed water'],
            2: ['==', 'default', 'Cloud'],
            3: ['==', 'default', 'Cloud shadow'],
            0xff: ['==', 'default', 'Other '],
        }
    elif dataset == 'tree':
        class_config = ClassConfig(names=['green_stage', 'red_stage', 'other'],
                                   colors=['green', 'brown', 'cyan'])
        class_id_filter_dict = {
            0: ['==', 'default', "Green stage conifer"],
            1: ['==', 'default', "Red stage conifer"],
            0xff: ['==', 'default', "Other"],
        }
    else:
        raise Exception()

    train_crops = []
    val_crops = []
    for x in range(0, 5):
        for y in range(0, 5):
            x_start = x / 5.0
            x_end = 0.80 - x_start
            y_start = y / 5.0
            y_end = 0.80 - y_start
            crop = [x_start, y_start, x_end, y_end]
            if x == y:
                val_crops.append(crop)
            else:
                train_crops.append(crop)

    scenes = get_scenes(json,
                        class_config,
                        class_id_filter_dict,
                        catalog_dir,
                        imagery_dir,
                        train_crops=train_crops,
                        val_crops=val_crops,
                        N=N)

    train_scenes, validation_scenes = scenes

    print(f'{len(train_scenes)} training scenes')
    print(f'{len(validation_scenes)} validation scenes')

    dataset = DatasetConfig(
        class_config=class_config,
        train_scenes=train_scenes,
        validation_scenes=validation_scenes,
    )

    solver = SolverConfig()
    data = SemanticSegmentationImageDataConfig(img_sz=chip_sz,
                                               num_workers=0,
                                               preview_batch_limit=8)
    model = SemanticSegmentationModelConfig()

    backend = PyTorchSemanticSegmentationConfig(model=model,
                                                data=data,
                                                solver=solver)

    chip_options = SemanticSegmentationChipOptions(
        window_method=SemanticSegmentationWindowMethod.sliding, stride=chip_sz)

    return SemanticSegmentationConfig(root_uri=root_uri,
                                      dataset=dataset,
                                      backend=backend,
                                      train_chip_sz=chip_sz,
                                      predict_chip_sz=chip_sz,
                                      chip_options=chip_options,
                                      chip_nodata_threshold=.75,
                                      img_format='npy',
                                      label_format='png')
Exemple #5
0
def get_config(runner,
               root_uri,
               analyze_uri,
               chip_uri,
               json,
               chip_sz=512,
               batch_sz=32,
               epochs=33,
               preshrink=1,
               small_test=False,
               architecture='cheaplab',
               level='L1C'):

    chip_sz = int(chip_sz)
    epochs = int(epochs)
    batch_sz = int(batch_sz)
    preshrink = int(preshrink)
    assert (architecture in ['cheaplab', 'fpn-resnet18'])
    assert (level in ['L1C', 'L2A'])

    if level == 'L1C':
        channel_order = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
    elif level == 'L2A':
        channel_order = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]

    num_channels = len(channel_order)

    class_config = ClassConfig(names=["background", "cloud"],
                               colors=["brown", "white"])

    class_id_filter_dict = {
        0: ['==', 'default', 'Background'],
        1: ['==', 'default', 'Cloud'],
    }

    train_crops = []
    val_crops = []
    for x in range(0, 5):
        for y in range(0, 5):
            x_start = x / 5.0
            x_end = 0.80 - x_start
            y_start = y / 5.0
            y_end = 0.80 - y_start
            crop = [x_start, y_start, x_end, y_end]
            if x == y:
                val_crops.append(crop)
            else:
                train_crops.append(crop)

    scenes = get_scenes(json,
                        channel_order,
                        class_config,
                        class_id_filter_dict,
                        level,
                        train_crops=train_crops,
                        val_crops=val_crops)

    train_scenes, validation_scenes = scenes

    if small_test:
        train_scenes = train_scenes[0:2]
        validation_scenes = validation_scenes[0:2]

    print(f"{len(train_scenes)} training scenes")
    print(f"{len(validation_scenes)} validation scenes")

    dataset = DatasetConfig(
        class_config=class_config,
        train_scenes=train_scenes,
        validation_scenes=validation_scenes,
    )

    if architecture == 'fpn-resnet18':
        external_def = ExternalModuleConfig(
            github_repo='AdeelH/pytorch-fpn:0.1',
            name='pytorch-fpn',
            entrypoint='make_segm_fpn_resnet',
            entrypoint_kwargs={
                'name': 'resnet18',
                'fpn_type': 'fpn',
                'num_classes': 2,
                'fpn_channels': 256,
                'in_channels': len(channel_order),
                'out_size': (chip_sz, chip_sz)
            })
    else:
        external_def = ExternalModuleConfig(
            github_repo='jamesmcclain/CheapLab:08d260b',
            name='cheaplab',
            entrypoint='make_cheaplab_model',
            entrypoint_kwargs={
                'preshrink': preshrink,
                'num_channels': num_channels
            })

    model = SemanticSegmentationModelConfig(external_def=external_def)

    external_loss_def = ExternalModuleConfig(
        github_repo='jamesmcclain/CheapLab:08d260b',
        name='bce_loss',
        entrypoint='make_bce_loss',
        force_reload=False,
        entrypoint_kwargs={})

    backend = PyTorchSemanticSegmentationConfig(
        model=model,
        solver=SolverConfig(lr=1e-4,
                            num_epochs=epochs,
                            batch_sz=batch_sz,
                            external_loss_def=external_loss_def,
                            ignore_last_class='force'),
        log_tensorboard=False,
        run_tensorboard=False,
        num_workers=0,
        preview_batch_limit=8)

    chip_options = SemanticSegmentationChipOptions(
        window_method=SemanticSegmentationWindowMethod.sliding, stride=chip_sz)

    return SemanticSegmentationConfig(root_uri=root_uri,
                                      analyze_uri=analyze_uri,
                                      chip_uri=chip_uri,
                                      dataset=dataset,
                                      backend=backend,
                                      train_chip_sz=chip_sz,
                                      predict_chip_sz=chip_sz,
                                      chip_options=chip_options,
                                      chip_nodata_threshold=.75,
                                      img_format='npy',
                                      label_format='npy')