def test_georeference():
    tile = Tile.from_tms(139423, 171197, 18)
    img_path = os.path.join(os.getcwd(), "test", "data", "18_139423_171197.tif")
    m = MarchingSquares.from_file(img_path)
    points = m.find_contour()
    geo_points = georeference(points, tile)
    p = geometry.Polygon(geo_points)
    print(p.wkt)
    assert p.wkt == "POLYGON ((11.46890044212341 48.1641425687818, 11.46890580654145 48.1641425687818, 11.46890580654145 48.16404238298088, 11.46879851818085 48.16404238298088, 11.46879851818085 48.16413899072083, 11.46890044212341 48.16413899072083, 11.46890044212341 48.1641425687818))"
Example #2
0
def _predict(request: InferenceRequest):
    print("Decoding image")
    b64 = base64.b64decode(request.image_data)
    print("Image decoded")
    barr = io.BytesIO(b64)
    img = Image.open(barr)
    img = img.convert("RGB")
    width, height = img.size
    extent = {
        'x_min': request.x_min,
        'y_min': request.y_min,
        'x_max': request.x_max,
        'y_max': request.y_max,
        'img_width': width,
        'img_height': height
    }

    img_size = 1024

    all_polygons = []
    cols = int(math.ceil(width / float(img_size)))
    rows = int(math.ceil(height / float(img_size)))
    images_to_predict = []
    tiles_by_img_id = {}
    for col in range(0, cols):
        for row in range(0, rows):
            print("Processing tile (x={},y={})".format(col, row))
            start_width = col * img_size
            start_height = row * img_size
            img_copy = img.crop(
                (start_width, start_height, start_width + img_size,
                 start_height + img_size))
            arr = np.asarray(img_copy)
            img_id = "img_id_{}_{}".format(col, row)
            tiles_by_img_id[img_id] = (col, row)
            images_to_predict.append((arr, img_id))
    point_set = _predictor.predict_arrays(images=images_to_predict)
    for rle, score, img_id in point_set:
        col, row = tiles_by_img_id[img_id]
        mask = cocomask.decode(rle)
        mask = mask.reshape((img_size, img_size))
        points = get_contour(mask)
        points = list(
            map(lambda p: (p[0] + col * 256, p[1] + row * 256), points))
        if request.rectangularize:
            points = rectangularize(points)
        georeffed = georeference(points, extent)
        if georeffed:
            points = georeffed
        polygon = geometry.Polygon(points)
        all_polygons.append(polygon)

    return list(map(lambda p: p.wkt, all_polygons))
Example #3
0
    def predict_array(self, img_data: np.ndarray, extent=None, do_rectangularization=True, tile=None) \
            -> List[List[Tuple[int, int]]]:
        if not tile:
            tile = (0, 0)

        if not self._model:
            print("Loading model")
            inference_config = self.InferenceConfig()
            # Create model in training mode
            model = modellib.MaskRCNN(mode="inference",
                                      config=inference_config,
                                      model_dir="log")
            model.load_weights(self.weights_path, by_name=True)
            self._model = model

        model = self._model
        print("Predicting...")
        res = model.detect([img_data], verbose=1)
        print("Prediction done")
        print("Extracting contours...")
        point_sets = get_contours(masks=res[0]['masks'])
        point_sets = list(map(lambda point_set: list(point_set), point_sets))
        print("Contours extracted")

        rectangularized_outlines = []
        if do_rectangularization:
            point_sets = list(
                map(lambda point_set: rectangularize(point_set), point_sets))

        point_sets_mapped = []
        col, row = tile
        for points in point_sets:
            pp = list(
                map(lambda p: (p[0] + col * 256, p[1] + row * 256), points))
            if pp:
                point_sets_mapped.append(pp)
        point_sets = point_sets_mapped

        if not extent:
            rectangularized_outlines = point_sets
        else:
            for o in point_sets:
                georeffed = georeference(o, extent)
                if georeffed:
                    rectangularized_outlines.append(georeffed)
        return rectangularized_outlines
def _predict(request: InferenceRequest):
    print("Decoding image")
    b64 = base64.b64decode(request.image_data)
    print("Image decoded")
    barr = io.BytesIO(b64)
    img = Image.open(barr)
    img = img.convert("RGB")
    width, height = img.size
    print("Received image size: ", img.size)
    extent = {
        'x_min': request.x_min,
        'y_min': request.y_min,
        'x_max': request.x_max,
        'y_max': request.y_max,
        'img_width': width,
        'img_height': height
    }

    img_size = 512  # the image will be cropped for prediction
    scale_by_factor = 3  # and scaled by this factor for improved accuracy
    new_width = width * scale_by_factor
    new_height = height * scale_by_factor

    img = img.resize((new_width, new_height), Image.ANTIALIAS)

    all_polygons = []
    cols = int(math.ceil(new_width / float(img_size)))
    rows = int(math.ceil(new_height / float(img_size)))
    images_to_predict = []
    tiles_by_img_id = {}
    count = 0
    for col in range(0, cols):
        for row in range(0, rows):
            count += 1
            print("Processing tile (x={},y={})".format(col, row))
            start_width = col * img_size
            start_height = row * img_size
            img_copy = img.crop((start_width, start_height, start_width+img_size, start_height+img_size))
            print("Cropped image size: ", img_copy.size)
            # img_copy = img.resize((512, 512), Image.ANTIALIAS)
            # img_copy.save(r"C:\Users\Martin\AppData\Local\Temp\deep_osm\cropped_{}.png".format(str(count)))
            arr = np.asarray(img_copy)
            img_id = "img_id_{}_{}".format(col, row)
            tiles_by_img_id[img_id] = (col, row)
            images_to_predict.append((arr, img_id))
            # break
        # break
    # images_to_predict = images_to_predict[0:3]
    point_sets = _predictor.predict_arrays(images=images_to_predict)

    count = 0
    for points, img_id, class_name in point_sets:
        count += 1
        col, row = tiles_by_img_id[img_id]
        points = list(map(lambda p: ((p[0]+col*img_size)/scale_by_factor, (p[1]+row*img_size)/scale_by_factor), points))
        if request.rectangularize:
            print("Rectangularizing point set {}/{}...".format(count, len(point_sets)))
            points = rectangularize(points)
            print("Rectangularizing complete")
        print("Georeferencing point set {}/{}...".format(count, len(point_sets)))
        georeffed = georeference(points, extent)
        print("Georeferencing complete")
        if georeffed:
            points = georeffed
        polygon = geometry.Polygon(points)
        all_polygons.append((polygon, class_name))

    return all_polygons