Esempio n. 1
0
def get_input():
    bottle.response.content_type = 'application/json'
    data = bottle.request.json
    current_session = SESSION_HANDLER.get_session(bottle.request.session.id)

    current_session.add_entry(data)  # record this interaction

    # Inputs
    extent = data["extent"]
    dataset = data["dataset"]

    if dataset not in DATALOADERS:
        raise ValueError(
            "Dataset doesn't seem to be valid, please check Datasets.py")
    else:
        current_data_loader = DATALOADERS[dataset]

    input_raster = current_data_loader.get_data_from_extent(extent)
    warped_output_raster = warp_data_to_3857(
        input_raster)  # warp image to 3857
    cropped_warped_output_raster = crop_data_by_extent(
        warped_output_raster, extent)  # crop to the desired extent

    img = cropped_warped_output_raster.data[:, :, :3].copy().astype(
        np.uint8)  # keep the RGB channels to save as a color image
    img = cv2.imencode(".png", cv2.cvtColor(img,
                                            cv2.COLOR_RGB2BGR))[1].tostring()
    img = base64.b64encode(img).decode("utf-8")
    data["input_img"] = img

    bottle.response.status = 200
    return json.dumps(data)
Esempio n. 2
0
def get_input():
    bottle.response.content_type = 'application/json'
    data = bottle.request.json
    
    SESSION_HANDLER.get_session(bottle.request.session.id).add_entry(data) # record this interaction

    # Inputs
    extent = data["extent"]
    dataset = data["dataset"]

    if dataset not in DATASETS:
        raise ValueError("Dataset doesn't seem to be valid, please check Datasets.py")

    patch, crs, transform, bounds = DATASETS[dataset]["data_loader"].get_data_from_extent(extent)
    print("get_input, after get_data_from_extent:", patch.shape)

    warped_patch, warped_patch_crs, warped_patch_transform, warped_patch_bounds = warp_data_to_3857(patch, crs, transform, bounds)
    print("get_input, after warp_data_to_3857:", warped_patch.shape)

    cropped_warped_patch, cropped_warped_patch_transform = crop_data_by_extent(warped_patch, warped_patch_crs, warped_patch_transform, extent)
    print("get_input, after crop_data_by_extent:", cropped_warped_patch.shape)

    img = cropped_warped_patch[:,:,:3].copy().astype(np.uint8) # keep the RGB channels to save as a color image

    img = cv2.imencode(".png", cv2.cvtColor(img, cv2.COLOR_RGB2BGR))[1].tostring()
    img = base64.b64encode(img).decode("utf-8")
    data["input_naip"] = img

    bottle.response.status = 200
    return json.dumps(data)
Esempio n. 3
0
def pred_patch():
    bottle.response.content_type = 'application/json'
    data = bottle.request.json
    current_session = SESSION_HANDLER.get_session(bottle.request.session.id)

    current_session.add_entry(data)  # record this interaction

    # Inputs
    extent = data["extent"]
    dataset = data["dataset"]
    class_list = data["classes"]
    name_list = [item["name"] for item in class_list]
    color_list = [item["color"] for item in class_list]

    if dataset not in DATALOADERS:
        raise ValueError(
            "Dataset doesn't seem to be valid, do the datasets in js/tile_layers.js correspond to those in TileLayers.py"
        )
    else:
        current_data_loader = DATALOADERS[dataset]

    input_raster = current_data_loader.get_data_from_extent(extent)
    current_session.latest_input_raster = input_raster

    output_raster = current_session.pred_patch(input_raster)  # run inference
    warped_output_raster = warp_data_to_3857(
        output_raster)  # warp output to 3857
    cropped_warped_output_raster = crop_data_by_extent(
        warped_output_raster, extent)  # crop to the desired result

    if cropped_warped_output_raster.shape[2] > len(color_list):
        LOGGER.warning(
            "The number of output channels is larger than the given color list, cropping output to number of colors (you probably don't want this to happen"
        )
        cropped_warped_output_raster.data = cropped_warped_output_raster.data[:, :, :len(
            color_list)]

    # Create color versions of predictions
    img_soft = class_prediction_to_img(cropped_warped_output_raster.data,
                                       False, color_list)
    img_soft = cv2.imencode(".png",
                            cv2.cvtColor(img_soft,
                                         cv2.COLOR_RGB2BGR))[1].tostring()
    img_soft = base64.b64encode(img_soft).decode("utf-8")
    data["output_soft"] = img_soft

    img_hard = class_prediction_to_img(cropped_warped_output_raster.data, True,
                                       color_list)
    img_hard = cv2.imencode(".png",
                            cv2.cvtColor(img_hard,
                                         cv2.COLOR_RGB2BGR))[1].tostring()
    img_hard = base64.b64encode(img_hard).decode("utf-8")
    data["output_hard"] = img_hard

    bottle.response.status = 200
    return json.dumps(data)
Esempio n. 4
0
def pred_tile():
    bottle.response.content_type = 'application/json'
    data = bottle.request.json
    current_session = SESSION_HANDLER.get_session(bottle.request.session.id)

    current_session.add_entry(data)  # record this interaction

    # Inputs
    geom = data["polygon"]
    class_list = data["classes"]
    name_list = [item["name"] for item in class_list]
    color_list = [item["color"] for item in class_list]
    dataset = data["dataset"]
    zone_layer_name = data["zoneLayerName"]
    model_idx = data["modelIdx"]

    if dataset not in DATALOADERS:
        raise ValueError(
            "Dataset doesn't seem to be valid, do the datasets in js/tile_layers.js correspond to those in TileLayers.py"
        )
    else:
        current_data_loader = DATALOADERS[dataset]

    try:
        input_raster = current_data_loader.get_data_from_geometry(
            geom["geometry"])
        shape_area = get_area_from_geometry(geom["geometry"])
    except NotImplementedError as e:  # Example of how to handle errors from the rest of the server
        bottle.response.status = 400
        return json.dumps({
            "error":
            "Cannot currently download imagery with 'Basemap' based datasets"
        })

    output_raster = current_session.pred_tile(input_raster)
    if output_raster.shape[2] > len(color_list):
        LOGGER.warning(
            "The number of output channels is larger than the given color list, cropping output to number of colors (you probably don't want this to happen"
        )
        output_raster.data = output_raster.data[:, :, :len(color_list)]

    output_hard = output_raster.data.argmax(axis=2)
    nodata_mask = np.sum(input_raster.data == 0,
                         axis=2) == input_raster.shape[2]
    output_hard[nodata_mask] = 255
    class_vals, class_counts = np.unique(output_hard[~nodata_mask],
                                         return_counts=True)

    img_hard = class_prediction_to_img(output_raster.data, True, color_list)
    img_hard = cv2.cvtColor(img_hard, cv2.COLOR_RGB2BGRA)
    img_hard[nodata_mask] = [0, 0, 0, 0]

    # replace the output predictions with our image data because we are too lazy to make a new InMemoryRaster
    output_raster.data = img_hard
    output_raster.shape = img_hard.shape

    warped_output_raster = warp_data_to_3857(
        output_raster)  # warp output to 3857
    cropped_warped_output_raster = crop_data_by_geometry(
        warped_output_raster, geom["geometry"],
        "epsg:4326")  # crop to the desired shape
    img_hard = cropped_warped_output_raster.data

    tmp_id = get_random_string(8)
    cv2.imwrite("tmp/downloads/%s.png" % (tmp_id), img_hard)
    data["downloadPNG"] = "tmp/downloads/%s.png" % (tmp_id)

    new_profile = {}
    new_profile['driver'] = 'GTiff'
    new_profile['dtype'] = 'uint8'
    new_profile['compress'] = "lzw"
    new_profile['count'] = 1
    new_profile['transform'] = output_raster.transform
    new_profile['height'] = output_hard.shape[0]
    new_profile['width'] = output_hard.shape[1]
    new_profile['nodata'] = 255
    new_profile['crs'] = output_raster.crs
    with rasterio.open("tmp/downloads/%s.tif" % (tmp_id), 'w',
                       **new_profile) as f:
        f.write(output_hard.astype(np.uint8), 1)
    data["downloadTIFF"] = "tmp/downloads/%s.tif" % (tmp_id)

    data["classStatistics"] = []

    f = open("tmp/downloads/%s.txt" % (tmp_id), "w")
    f.write("Class id\tClass name\tPercent area\tArea (km^2)\n")
    for i in range(len(class_vals)):
        pct_area = (class_counts[i] / np.sum(class_counts))
        if shape_area is not None:
            real_area = shape_area * pct_area
        else:
            real_area = -1
        f.write("%d\t%s\t%0.4f%%\t%0.4f\n" %
                (class_vals[i], name_list[class_vals[i]], pct_area * 100,
                 real_area))
        data["classStatistics"].append({
            "Class ID": int(class_vals[i]),
            "Class Name": name_list[class_vals[i]],
            "Percent Area": float(pct_area),
            "Area (km2)": float(real_area)
        })
    f.close()
    data["downloadStatistics"] = "tmp/downloads/%s.txt" % (tmp_id)

    bottle.response.status = 200
    return json.dumps(data)
Esempio n. 5
0
def pred_tile():
    bottle.response.content_type = 'application/json'
    data = bottle.request.json

    SESSION_HANDLER.get_session(bottle.request.session.id).add_entry(data) # record this interaction

    # Inputs
    geom = data["polygon"]
    class_list = data["classes"]
    name_list = [item["name"] for item in class_list]
    color_list = [item["color"] for item in class_list]
    dataset = data["dataset"]
    zone_layer_name = data["zoneLayerName"]
    model_idx = data["modelIdx"]

    if dataset not in DATASETS:
        raise ValueError("Dataset doesn't seem to be valid, do the datasets in js/tile_layers.js correspond to those in TileLayers.py")    
    
    try:
        tile, raster_profile, raster_transform, raster_bounds, raster_crs = DATASETS[dataset]["data_loader"].get_data_from_shape(geom["geometry"])
        print("pred_tile, get_data_from_shape:", tile.shape)

        shape_area = get_area_from_geometry(geom["geometry"])      
    except NotImplementedError as e:
        bottle.response.status = 400
        return json.dumps({"error": "Cannot currently download imagery with 'Basemap' based datasets"})

    output = SESSION_HANDLER.get_session(bottle.request.session.id).model.run(tile, True)
    print("pred_tile, after model.run:", output.shape)
    
    if output.shape[2] > len(color_list):
       LOGGER.warning("The number of output channels is larger than the given color list, cropping output to number of colors (you probably don't want this to happen")
       output = output[:,:,:len(color_list)]
    
    output_hard = output.argmax(axis=2)

    # apply nodata mask from naip_data
    nodata_mask = np.sum(tile == 0, axis=2) == tile.shape[2]
    output_hard[nodata_mask] = 255
    vals, counts = np.unique(output_hard[~nodata_mask], return_counts=True)

    # ------------------------------------------------------
    # Step 4
    #   Convert images to base64 and return  
    # ------------------------------------------------------
    tmp_id = get_random_string(8)
    img_hard = class_prediction_to_img(output, True, color_list)
    img_hard = cv2.cvtColor(img_hard, cv2.COLOR_RGB2BGRA)
    img_hard[nodata_mask] = [0,0,0,0]

    img_hard, img_hard_crs, img_hard_transform, img_hard_bounds = warp_data_to_3857(img_hard, raster_crs, raster_transform, raster_bounds)
    print("pred_tile, after warp_data_to_3857:", img_hard.shape)

    img_hard, cropped_warped_patch_transform = crop_data_by_geometry(img_hard, img_hard_crs, img_hard_transform, geom["geometry"], "epsg:4326")
    print("pred_tile, after crop_data_by_geometry:", img_hard.shape)


    cv2.imwrite("tmp/downloads/%s.png" % (tmp_id), img_hard)
    data["downloadPNG"] = "tmp/downloads/%s.png" % (tmp_id)

    new_profile = raster_profile.copy()
    new_profile['driver'] = 'GTiff'
    new_profile['dtype'] = 'uint8'
    new_profile['compress'] = "lzw"
    new_profile['count'] = 1
    new_profile['transform'] = raster_transform
    new_profile['height'] = tile.shape[0] 
    new_profile['width'] = tile.shape[1]
    new_profile['nodata'] = 255
    f = rasterio.open("tmp/downloads/%s.tif" % (tmp_id), 'w', **new_profile)
    f.write(output_hard.astype(np.uint8), 1)
    f.close()
    data["downloadTIFF"] = "tmp/downloads/%s.tif" % (tmp_id)

    f = open("tmp/downloads/%s.txt" % (tmp_id), "w")
    f.write("Class id\tClass name\tPercent area\tArea (km^2)\n")
    for i in range(len(vals)):
        pct_area = (counts[i] / np.sum(counts))
        if shape_area is not None:
            real_area = shape_area * pct_area
        else:
            real_area = -1
        f.write("%d\t%s\t%0.4f%%\t%0.4f\n" % (vals[i], name_list[vals[i]], pct_area*100, real_area))
    f.close()
    data["downloadStatistics"] = "tmp/downloads/%s.txt" % (tmp_id)

    bottle.response.status = 200
    return json.dumps(data)
Esempio n. 6
0
def pred_patch():
    bottle.response.content_type = 'application/json'
    data = bottle.request.json

    SESSION_HANDLER.get_session(bottle.request.session.id).add_entry(data) # record this interaction

    # Inputs
    extent = data["extent"]
    dataset = data["dataset"]
    class_list = data["classes"]
    name_list = [item["name"] for item in class_list]
    color_list = [item["color"] for item in class_list]

    tic = float(time.time())

    # ------------------------------------------------------
    # Step 1
    #   Transform the input extent into a shapely geometry
    #   Find the tile assosciated with the geometry
    # ------------------------------------------------------
    
    # ------------------------------------------------------
    # Step 2
    #   Load the input data sources for the given tile  
    # ------------------------------------------------------

    if dataset not in DATASETS:
        raise ValueError("Dataset doesn't seem to be valid, do the datasets in js/tile_layers.js correspond to those in TileLayers.py")


    patch, crs, transform, bounds = DATASETS[dataset]["data_loader"].get_data_from_extent(extent)
    print("pred_patch, after get_data_from_extent:", patch.shape)

    SESSION_HANDLER.get_session(bottle.request.session.id).current_transform = (crs, transform)

    # ------------------------------------------------------
    # Step 3
    #   Run a model on the input data
    #   Apply reweighting
    #   Fix padding
    # ------------------------------------------------------
    output = SESSION_HANDLER.get_session(bottle.request.session.id).model.run(patch, False)
    assert len(output.shape) == 3, "The model function should return an image shaped as (height, width, num_classes)"
    assert (output.shape[2] < output.shape[0] and output.shape[2] < output.shape[1]), "The model function should return an image shaped as (height, width, num_classes)" # assume that num channels is less than img dimensions

    # ------------------------------------------------------
    # Step 4
    #   Warp output to EPSG:3857 and crop off the padded area
    # ------------------------------------------------------
    warped_output, warped_patch_crs, warped_patch_transform, warped_patch_bounds = warp_data_to_3857(output, crs, transform, bounds)
    print("pred_patch, after warp_data_to_3857:", warped_output.shape)

    cropped_warped_output, cropped_warped_patch_transform = crop_data_by_extent(warped_output, warped_patch_crs, warped_patch_transform, extent)
    print("pred_patch, after crop_data_by_extent:", cropped_warped_output.shape)

    if cropped_warped_output.shape[2] > len(color_list):
       LOGGER.warning("The number of output channels is larger than the given color list, cropping output to number of colors (you probably don't want this to happen")
       cropped_warped_output = cropped_warped_output[:,:,:len(color_list)]

    # ------------------------------------------------------
    # Step 5
    #   Convert images to base64 and return  
    # ------------------------------------------------------
    img_soft = class_prediction_to_img(cropped_warped_output, False, color_list)
    img_soft = cv2.imencode(".png", cv2.cvtColor(img_soft, cv2.COLOR_RGB2BGR))[1].tostring()
    img_soft = base64.b64encode(img_soft).decode("utf-8")
    data["output_soft"] = img_soft

    img_hard = class_prediction_to_img(cropped_warped_output, True, color_list)
    img_hard = cv2.imencode(".png", cv2.cvtColor(img_hard, cv2.COLOR_RGB2BGR))[1].tostring()
    img_hard = base64.b64encode(img_hard).decode("utf-8")
    data["output_hard"] = img_hard

    print("pred_patch took %0.2f seconds, of which:" % (time.time()-tic))
    # print("-- loading data: %0.2f seconds" % (toc_data_load))
    # print("-- running model: %0.2f seconds" % (toc_model_run))
    # print("-- warping/cropping: %0.2f seconds" % (time_for_crops_and_warps))
    # print("-- coloring: %0.2f seconds" % (time_for_coloring))
    bottle.response.status = 200
    return json.dumps(data)
Esempio n. 7
0
import sys
sys.path.append("../")

import rasterio
import rasterio.warp
import numpy as np

from web_tool.DataLoader import DataLoaderCustom, warp_data_to_3857

dataloader = DataLoaderCustom("../data/imagery/hcmc_sentinel.tif", [], padding=1100)

extent = {
    "crs": "epsg:3857",
    "xmax": 11880604,
    "xmin": 11880304,
    "ymax": 1206861.0000000007,
    "ymin": 1206561
}

patch, crs, transform, bounds = dataloader.get_data_from_extent(extent)
patch = np.rollaxis(patch, 0, 3)

print(patch.shape)

patch, new_bounds = warp_data_to_3857(patch, crs, transform, bounds)
Esempio n. 8
0
import sys
sys.path.append("../")

import rasterio
import rasterio.warp
import numpy as np

from web_tool.DataLoader import DataLoaderCustom, warp_data_to_3857, InMemoryRaster

dataloader = DataLoaderCustom("../data/imagery/hcmc_sentinel.tif", [],
                              padding=1100)

extent = {
    "crs": "epsg:3857",
    "xmax": 11880604,
    "xmin": 11880304,
    "ymax": 1206861.0000000007,
    "ymin": 1206561
}

raster = dataloader.get_data_from_extent(extent)

print(raster.shape)

warped_raster = warp_data_to_3857(raster)