예제 #1
0
def multires_inference(runs_dirpath, ori_image, ori_metadata,
                       ori_disp_polygons, model_disp_max_abs_value, batch_size,
                       ds_fac_list, run_name_list):
    """
    Returns the last segmentation image that was computed (from the finest resolution)

    :param ori_image:
    :param ori_metadata:
    :param ori_disp_polygons:
    :param model_disp_max_abs_value:
    :param ds_fac_list:
    :param run_name_list:
    :return:
    """
    aligned_disp_polygons = ori_disp_polygons  # init
    segmentation_image = None
    # Launch the resolution chain pipeline:
    for index, (ds_fac, run_name) in enumerate(zip(ds_fac_list,
                                                   run_name_list)):
        print("# --- downsampling_factor: {} --- #".format(ds_fac))
        try:
            aligned_disp_polygons, segmentation_image = inference(
                runs_dirpath, ori_image, ori_metadata, aligned_disp_polygons,
                model_disp_max_abs_value, batch_size, ds_fac, run_name)
        except ValueError as e:
            print_utils.print_warning(str(e))

    return aligned_disp_polygons, segmentation_image
예제 #2
0
def read_image(filepath, pixelsize=None):
    image_array = skimage.io.imread(filepath)
    if pixelsize is None:
        pixelsize = geo_utils.get_pixelsize(filepath)
    assert type(
        pixelsize) == float, "pixelsize should be float, not {}".format(
            type(pixelsize))
    if pixelsize < 1e-3:
        print_utils.print_warning(
            "WARNING: pixel size of image is detected to be {}m which seems very small to be correct. "
            "If problems occur specify pixelsize with the pixelsize command-line argument"
            .format(pixelsize))
    image_metadata = {
        "filepath": filepath,
        "pixelsize": pixelsize,
    }
    return image_array, image_metadata
예제 #3
0
def load_config(config_name="config", config_dirpath=""):
    config_filepath = os.path.join(config_dirpath, config_name + ".json")
    try:
        with open(config_filepath, 'r') as f:
            minified = jsmin(f.read())
            config = json.loads(minified)
        return config
    except FileNotFoundError:
        if config_name == "config" and config_dirpath == "":
            print_utils.print_warning(
                "WARNING: the default config file was not found....")
            return None
        else:
            print_utils.print_warning(
                "WARNING: config file {} was not found, opening default config file config.json instead."
                .format(config_filepath))
            return load_config()
예제 #4
0
def get_polygons_from_shapefile(image_filepath, input_shapefile_filepath):
    coor, gt, coor_system = get_coor_in_space(image_filepath)
    transform_mat = compute_epsg_to_image_mat(coor, gt)

    file = ogr.Open(input_shapefile_filepath)
    assert file is not None, "File {} does not exist!".format(
        input_shapefile_filepath)
    shape = file.GetLayer(0)
    feature_count = shape.GetFeatureCount()
    polygons = []
    properties_list = []
    for feature_index in range(feature_count):
        feature = shape.GetFeature(feature_index)
        raw_json = feature.ExportToJson()
        parsed_json = json.loads(raw_json)

        # Extract polygon:
        polygon = np.array(parsed_json["geometry"]["coordinates"][0])
        assert len(polygon.shape) == 2, "polygon should have shape (n, d)"
        if 2 < polygon.shape[1]:
            print_utils.print_warning(
                "WARNING: polygon from shapefile has shape {}. Will discard extra values to have polygon with shape ({}, 2)"
                .format(polygon.shape, polygon.shape[0]))
            polygon = polygon[:, :2]
        polygon_epsg_space = polygon
        polygon_image_space = apply_transform_mat(polygon_epsg_space,
                                                  transform_mat)
        polygon_image_space = polygon_utils.swap_coords(polygon_image_space)
        polygons.append(polygon_image_space)

        # Extract properties:
        if "properties" in parsed_json:
            properties = parsed_json["properties"]
            properties_list.append(properties)
    if properties_list:
        return polygons, properties_list
    else:
        return polygons
예제 #5
0
def process_dataset(dataset_fold, dataset_raw_dirpath,
                    image_info_list, overwrite_polygon_dir_name, patch_stride, patch_res,
                    data_aug_rot,
                    downsampling_factors,
                    disp_max_abs_value):
    print("Processing images from {}".format(dataset_raw_dirpath))

    for image_index, image_info in enumerate(image_info_list):
        print("Processing city {}. Progression: {}/{}"
              .format(image_info["city"], image_index + 1, len(image_info_list)))
        if "number" in image_info:
            # This is one image
            tile_info_list = [image_info]
        elif "numbers" in image_info:
            # This is multiple images
            tile_info_list = [
                {
                    "city": image_info["city"],
                    "number": number,
                    "min_downsampling_factor": image_info["min_downsampling_factor"],
                }
                for number in image_info["numbers"]
            ]
        else:
            print_utils.print_warning(
                "WARNING: image_info dict should have one of those keys: \"number\" or \"numbers\"")
            tile_info_list = []

        for tile_info in tile_info_list:
            image_name = read.IMAGE_NAME_FORMAT.format(city=tile_info["city"], number=tile_info["number"])
            print("Processing city {}, number {}"
                  .format(tile_info["city"], tile_info["number"]))

            include_polygons = (dataset_fold == "val" or dataset_fold == "test")
            if data_aug_rot and dataset_fold == "train":
                # Account for data augmentation when rotating patches on the training set
                adjusted_patch_res = math.ceil(patch_res * math.sqrt(2))
                adjusted_patch_stride = math.floor(
                    patch_stride * math.sqrt(
                        2) / 2)  # Divided by 2 so that no pixels are left out when rotating by 45 degrees
            else:
                adjusted_patch_res = patch_res
                adjusted_patch_stride = patch_stride

            # Filter out downsampling_factors that are lower than city_min_downsampling_factor
            image_downsampling_factors = [downsampling_factor for downsampling_factor in downsampling_factors if
                                          tile_info["min_downsampling_factor"] <= downsampling_factor]

            # Create writers
            writers = {}
            for downsampling_factor in downsampling_factors:
                filename_format = os.path.join(config.TFRECORDS_DIR,
                                               config.TFRECORD_FILEPATH_FORMAT.format(dataset_fold, image_name,
                                                                                      downsampling_factor))
                shard_writer = dataset_utils.TFRecordShardWriter(filename_format, config.RECORDS_PER_SHARD)
                writers[downsampling_factor] = shard_writer

            process_image(dataset_raw_dirpath, tile_info, overwrite_polygon_dir_name,
                          adjusted_patch_stride, adjusted_patch_res,
                          image_downsampling_factors,
                          disp_max_abs_value,
                          include_polygons,
                          writers)

            # Close writers
            for downsampling_factor in downsampling_factors:
                writers[downsampling_factor].close()