Ejemplo n.º 1
0
def get_input():
    bottle.response.content_type = 'application/json'
    data = bottle.request.json
    current_session = SESSION_HANDLER.get_session(bottle.request.session.id)

    current_session.add_entry(data)  # record this interaction

    # Inputs
    extent = data["extent"]
    dataset = data["dataset"]

    if dataset not in DATALOADERS:
        raise ValueError(
            "Dataset doesn't seem to be valid, please check Datasets.py")
    else:
        current_data_loader = DATALOADERS[dataset]

    input_raster = current_data_loader.get_data_from_extent(extent)
    warped_output_raster = warp_data_to_3857(
        input_raster)  # warp image to 3857
    cropped_warped_output_raster = crop_data_by_extent(
        warped_output_raster, extent)  # crop to the desired extent

    img = cropped_warped_output_raster.data[:, :, :3].copy().astype(
        np.uint8)  # keep the RGB channels to save as a color image
    img = cv2.imencode(".png", cv2.cvtColor(img,
                                            cv2.COLOR_RGB2BGR))[1].tostring()
    img = base64.b64encode(img).decode("utf-8")
    data["input_img"] = img

    bottle.response.status = 200
    return json.dumps(data)
Ejemplo n.º 2
0
def get_input():
    bottle.response.content_type = 'application/json'
    data = bottle.request.json
    
    SESSION_HANDLER.get_session(bottle.request.session.id).add_entry(data) # record this interaction

    # Inputs
    extent = data["extent"]
    dataset = data["dataset"]

    if dataset not in DATASETS:
        raise ValueError("Dataset doesn't seem to be valid, please check Datasets.py")

    patch, crs, transform, bounds = DATASETS[dataset]["data_loader"].get_data_from_extent(extent)
    print("get_input, after get_data_from_extent:", patch.shape)

    warped_patch, warped_patch_crs, warped_patch_transform, warped_patch_bounds = warp_data_to_3857(patch, crs, transform, bounds)
    print("get_input, after warp_data_to_3857:", warped_patch.shape)

    cropped_warped_patch, cropped_warped_patch_transform = crop_data_by_extent(warped_patch, warped_patch_crs, warped_patch_transform, extent)
    print("get_input, after crop_data_by_extent:", cropped_warped_patch.shape)

    img = cropped_warped_patch[:,:,:3].copy().astype(np.uint8) # keep the RGB channels to save as a color image

    img = cv2.imencode(".png", cv2.cvtColor(img, cv2.COLOR_RGB2BGR))[1].tostring()
    img = base64.b64encode(img).decode("utf-8")
    data["input_naip"] = img

    bottle.response.status = 200
    return json.dumps(data)
Ejemplo n.º 3
0
def pred_patch():
    bottle.response.content_type = 'application/json'
    data = bottle.request.json
    current_session = SESSION_HANDLER.get_session(bottle.request.session.id)

    current_session.add_entry(data)  # record this interaction

    # Inputs
    extent = data["extent"]
    dataset = data["dataset"]
    class_list = data["classes"]
    name_list = [item["name"] for item in class_list]
    color_list = [item["color"] for item in class_list]

    if dataset not in DATALOADERS:
        raise ValueError(
            "Dataset doesn't seem to be valid, do the datasets in js/tile_layers.js correspond to those in TileLayers.py"
        )
    else:
        current_data_loader = DATALOADERS[dataset]

    input_raster = current_data_loader.get_data_from_extent(extent)
    current_session.latest_input_raster = input_raster

    output_raster = current_session.pred_patch(input_raster)  # run inference
    warped_output_raster = warp_data_to_3857(
        output_raster)  # warp output to 3857
    cropped_warped_output_raster = crop_data_by_extent(
        warped_output_raster, extent)  # crop to the desired result

    if cropped_warped_output_raster.shape[2] > len(color_list):
        LOGGER.warning(
            "The number of output channels is larger than the given color list, cropping output to number of colors (you probably don't want this to happen"
        )
        cropped_warped_output_raster.data = cropped_warped_output_raster.data[:, :, :len(
            color_list)]

    # Create color versions of predictions
    img_soft = class_prediction_to_img(cropped_warped_output_raster.data,
                                       False, color_list)
    img_soft = cv2.imencode(".png",
                            cv2.cvtColor(img_soft,
                                         cv2.COLOR_RGB2BGR))[1].tostring()
    img_soft = base64.b64encode(img_soft).decode("utf-8")
    data["output_soft"] = img_soft

    img_hard = class_prediction_to_img(cropped_warped_output_raster.data, True,
                                       color_list)
    img_hard = cv2.imencode(".png",
                            cv2.cvtColor(img_hard,
                                         cv2.COLOR_RGB2BGR))[1].tostring()
    img_hard = base64.b64encode(img_hard).decode("utf-8")
    data["output_hard"] = img_hard

    bottle.response.status = 200
    return json.dumps(data)
Ejemplo n.º 4
0
def pred_patch():
    bottle.response.content_type = 'application/json'
    data = bottle.request.json

    SESSION_HANDLER.get_session(bottle.request.session.id).add_entry(data) # record this interaction

    # Inputs
    extent = data["extent"]
    dataset = data["dataset"]
    class_list = data["classes"]
    name_list = [item["name"] for item in class_list]
    color_list = [item["color"] for item in class_list]

    tic = float(time.time())

    # ------------------------------------------------------
    # Step 1
    #   Transform the input extent into a shapely geometry
    #   Find the tile assosciated with the geometry
    # ------------------------------------------------------
    
    # ------------------------------------------------------
    # Step 2
    #   Load the input data sources for the given tile  
    # ------------------------------------------------------

    if dataset not in DATASETS:
        raise ValueError("Dataset doesn't seem to be valid, do the datasets in js/tile_layers.js correspond to those in TileLayers.py")


    patch, crs, transform, bounds = DATASETS[dataset]["data_loader"].get_data_from_extent(extent)
    print("pred_patch, after get_data_from_extent:", patch.shape)

    SESSION_HANDLER.get_session(bottle.request.session.id).current_transform = (crs, transform)

    # ------------------------------------------------------
    # Step 3
    #   Run a model on the input data
    #   Apply reweighting
    #   Fix padding
    # ------------------------------------------------------
    output = SESSION_HANDLER.get_session(bottle.request.session.id).model.run(patch, False)
    assert len(output.shape) == 3, "The model function should return an image shaped as (height, width, num_classes)"
    assert (output.shape[2] < output.shape[0] and output.shape[2] < output.shape[1]), "The model function should return an image shaped as (height, width, num_classes)" # assume that num channels is less than img dimensions

    # ------------------------------------------------------
    # Step 4
    #   Warp output to EPSG:3857 and crop off the padded area
    # ------------------------------------------------------
    warped_output, warped_patch_crs, warped_patch_transform, warped_patch_bounds = warp_data_to_3857(output, crs, transform, bounds)
    print("pred_patch, after warp_data_to_3857:", warped_output.shape)

    cropped_warped_output, cropped_warped_patch_transform = crop_data_by_extent(warped_output, warped_patch_crs, warped_patch_transform, extent)
    print("pred_patch, after crop_data_by_extent:", cropped_warped_output.shape)

    if cropped_warped_output.shape[2] > len(color_list):
       LOGGER.warning("The number of output channels is larger than the given color list, cropping output to number of colors (you probably don't want this to happen")
       cropped_warped_output = cropped_warped_output[:,:,:len(color_list)]

    # ------------------------------------------------------
    # Step 5
    #   Convert images to base64 and return  
    # ------------------------------------------------------
    img_soft = class_prediction_to_img(cropped_warped_output, False, color_list)
    img_soft = cv2.imencode(".png", cv2.cvtColor(img_soft, cv2.COLOR_RGB2BGR))[1].tostring()
    img_soft = base64.b64encode(img_soft).decode("utf-8")
    data["output_soft"] = img_soft

    img_hard = class_prediction_to_img(cropped_warped_output, True, color_list)
    img_hard = cv2.imencode(".png", cv2.cvtColor(img_hard, cv2.COLOR_RGB2BGR))[1].tostring()
    img_hard = base64.b64encode(img_hard).decode("utf-8")
    data["output_hard"] = img_hard

    print("pred_patch took %0.2f seconds, of which:" % (time.time()-tic))
    # print("-- loading data: %0.2f seconds" % (toc_data_load))
    # print("-- running model: %0.2f seconds" % (toc_model_run))
    # print("-- warping/cropping: %0.2f seconds" % (time_for_crops_and_warps))
    # print("-- coloring: %0.2f seconds" % (time_for_coloring))
    bottle.response.status = 200
    return json.dumps(data)