Esempio n. 1
0
def save_plot_image_polygons(filepath, ori_image, ori_gt_polygons,
                             disp_polygons, aligned_disp_polygons):
    spatial_shape = ori_image.shape[:2]
    ori_gt_polygons_map = polygon_utils.draw_polygon_map(ori_gt_polygons,
                                                         spatial_shape,
                                                         fill=False,
                                                         edges=True,
                                                         vertices=False,
                                                         line_width=1)
    disp_polygons_map = polygon_utils.draw_polygon_map(disp_polygons,
                                                       spatial_shape,
                                                       fill=False,
                                                       edges=True,
                                                       vertices=False,
                                                       line_width=1)
    aligned_disp_polygons_map = polygon_utils.draw_polygon_map(
        aligned_disp_polygons,
        spatial_shape,
        fill=False,
        edges=True,
        vertices=False,
        line_width=1)

    output_image = ori_image[:, :, :3]  # Keep first 3 channels
    output_image = output_image.astype(np.float64)
    output_image[np.where(0 < ori_gt_polygons_map[:, :, 0])] = np.array(
        [0, 255, 0])
    output_image[np.where(0 < disp_polygons_map[:, :, 0])] = np.array(
        [255, 0, 0])
    output_image[np.where(0 < aligned_disp_polygons_map[:, :, 0])] = np.array(
        [0, 0, 255])
    # output_image = np.clip(output_image, 0, 255)
    output_image = output_image.astype(np.uint8)

    skimage.io.imsave(filepath, output_image)
Esempio n. 2
0
def measure_ious(gt_polygons, pred_seg, thresholds, filepath):
    padding = (220 - 100) // 2  # TODO: retrieve this programmatically
    gt_seg = polygon_utils.draw_polygon_map(gt_polygons, pred_seg.shape[:2], fill=True, edges=True, vertices=True)
    # Crop both images to remove margin
    pred_seg = pred_seg[padding:-padding, padding:-padding, :]
    gt_seg = gt_seg[padding:-padding, padding:-padding, :]
    # Reduce channels to single max value
    pred_seg = np.max(pred_seg, axis=-1)
    gt_seg = np.max(gt_seg, axis=-1)
    gt_mask = gt_seg.astype(np.bool)
    # Create thresholded masks
    ious = []
    for threshold in thresholds:
        pred_mask = threshold < pred_seg

        # import skimage.io
        # skimage.io.imsave("pred_mask_{:0.02}.png".format(threshold), pred_mask * 255)

        intersection = pred_mask & gt_mask
        union = pred_mask | gt_mask
        intersection_count = np.sum(intersection)
        union_count = np.sum(union)
        if 0 < union_count:
            iou = intersection_count / float(union_count)
        else:
            iou = np.nan
        ious.append(iou)

    thresholds_ious = {
        "thresholds": thresholds,
        "ious": ious,
    }
    np.save(filepath, thresholds_ious)
    return ious
Esempio n. 3
0
def save_plot_image_polygon(filepath, image, polygons):
    spatial_shape = image.shape[:2]
    polygons_map = polygon_utils.draw_polygon_map(polygons, spatial_shape, fill=False, edges=True,
                                                         vertices=False, line_width=1)

    output_image = image[:, :, :3]  # Keep first 3 channels
    output_image = output_image.astype(np.float64)
    output_image[np.where(0 < polygons_map[:, :, 0])] = np.array([0, 0, 255])
    # output_image = np.clip(output_image, 0, 255)
    output_image = output_image.astype(np.uint8)

    skimage.io.imsave(filepath, output_image)
Esempio n. 4
0
def compute_grads(raw_dirpath, runs_dirpath, run_name, ds_fac,
                  overwrite_config, tile_info_list, polygon_dirname,
                  output_dirname, output_filepath_format):
    # -- Params:

    # Setup run dir and load config file
    run_dir = run_utils.setup_run_dir(runs_dirpath, run_name)
    _, checkpoints_dir = run_utils.setup_run_subdirs(run_dir)

    config = run_utils.load_config(config_dirpath=run_dir)

    # --- Instantiate model
    output_res = model.MapAlignModel.get_output_res(
        overwrite_config["input_res"], config["pool_count"])
    map_align_model = model.MapAlignModel(
        config["model_name"], overwrite_config["input_res"],
        config["add_image_input"], config["image_channel_count"],
        config["image_feature_base_count"], config["add_poly_map_input"],
        config["poly_map_channel_count"],
        config["poly_map_feature_base_count"],
        config["common_feature_base_count"], config["pool_count"],
        config["add_disp_output"], config["disp_channel_count"],
        config["add_seg_output"], config["seg_channel_count"], output_res,
        overwrite_config["batch_size"], config["loss_params"],
        config["level_loss_coefs_params"], config["learning_rate_params"],
        config["weight_decay"], config["image_dynamic_range"],
        config["disp_map_dynamic_range_fac"], config["disp_max_abs_value"])
    map_align_model.setup_compute_grads()  # Add ops to compute gradients

    saver = tf.train.Saver(save_relative_paths=True)
    with tf.Session() as sess:
        # Restore checkpoint
        restore_checkpoint_success = map_align_model.restore_checkpoint(
            sess, saver, checkpoints_dir)
        if not restore_checkpoint_success:
            sys.exit('No checkpoint found in {}'.format(checkpoints_dir))

        # Compute patch count
        patch_total_count = 0
        for tile_info in tile_info_list:
            patch_total_count += len(tile_info["bbox_list"])

        pbar = tqdm(total=patch_total_count,
                    desc="Computing patch gradients: ")
        for tile_info in tile_info_list:
            # --- Path setup:
            unused_filepath = output_filepath_format.format(
                dir=raw_dirpath,
                fold=tile_info["fold"],
                out_dir=output_dirname,
                tile="",
                b0=0,
                b1=0,
                b2=0,
                b3=0,
                out_name="",
                ext="")
            os.makedirs(os.path.dirname(unused_filepath), exist_ok=True)
            tile_name = read.IMAGE_NAME_FORMAT.format(
                city=tile_info["city"], number=tile_info["number"])

            # Compute grads for that image
            additional_args = {
                "overwrite_polygon_dir_name": polygon_dirname,
            }
            # t = time.clock()
            image, metadata, polygons = read.load_gt_data(
                raw_dirpath,
                tile_info["city"],
                tile_info["number"],
                additional_args=additional_args)
            # t_read = time.clock() - t
            # Downsample
            image, polygons = process_utils.downsample_data(
                image, metadata, polygons, ds_fac,
                config["reference_pixel_size"])
            spatial_shape = image.shape[:2]

            # Draw polygon map
            # t = time.clock()
            polygon_map = polygon_utils.draw_polygon_map(polygons,
                                                         spatial_shape,
                                                         fill=True,
                                                         edges=True,
                                                         vertices=True)
            # t_draw = time.clock() - t

            t_grads = 0
            t_save = 0
            for bbox in tile_info["bbox_list"]:
                p_im = image[bbox[0]:bbox[2], bbox[1]:bbox[3], :]
                p_polygon_map = polygon_map[bbox[0]:bbox[2],
                                            bbox[1]:bbox[3], :]
                # p_polygons = polygon_utils.crop_polygons_to_patch_if_touch(polygons, bbox)

                # Grad compute
                t = time.clock()
                grads = map_align_model.compute_grads(sess, p_im,
                                                      p_polygon_map)
                t_grads += time.clock() - t

                # Saving
                t = time.clock()
                flattened_grads_x = get_flattened_gradients(grads["x"])
                flattened_grads_y = get_flattened_gradients(grads["y"])
                flattened_grads = np.stack(
                    [flattened_grads_x, flattened_grads_y], axis=-1)

                # # Save patch for later visualization
                # im_filepath = output_filepath_format.format(dir=raw_dirpath, fold=tile_info["fold"],
                #                                             out_dir=output_dirname, tile=tile_name,
                #                                             b0=bbox[0], b1=bbox[1], b2=bbox[2], b3=bbox[3],
                #                                             out_name="image", ext="png")
                # skimage.io.imsave(im_filepath, p_im)
                # # Save polygons as well
                # polygons_filepath = output_filepath_format.format(dir=raw_dirpath, fold=tile_info["fold"],
                #                                                   out_dir=output_dirname, tile=tile_name,
                #                                                   b0=bbox[0], b1=bbox[1], b2=bbox[2], b3=bbox[3],
                #                                                   out_name="polygons", ext="npy")
                # np.save(polygons_filepath, p_polygons)
                # Save grads
                grads_filepath = output_filepath_format.format(
                    dir=raw_dirpath,
                    fold=tile_info["fold"],
                    out_dir=output_dirname,
                    tile=tile_name,
                    b0=bbox[0],
                    b1=bbox[1],
                    b2=bbox[2],
                    b3=bbox[3],
                    out_name="grads",
                    ext="npy")
                np.save(grads_filepath, flattened_grads)
                t_save += time.clock() - t

            pbar.update(len(tile_info["bbox_list"]))
            pbar.set_postfix(t_grads=t_grads, t_save=t_save)
        pbar.close()
Esempio n. 5
0
def inference(runs_dirpath, ori_image, ori_metadata, ori_disp_polygons,
              model_disp_max_abs_value, batch_size, scale_factor, run_name):
    # Setup run dir and load config file
    run_dir = run_utils.setup_run_dir(runs_dirpath, run_name)
    _, checkpoints_dir = run_utils.setup_run_subdirs(run_dir)

    config = run_utils.load_config(
        config_dirpath=os.path.dirname(os.path.realpath(__file__)))
    #run_dir) why would there be a second config in run dir??

    # Downsample
    image, disp_polygons = downsample_data(ori_image, ori_metadata,
                                           ori_disp_polygons, scale_factor,
                                           config["reference_pixel_size"])
    spatial_shape = image.shape[:2]

    # Draw displaced polygon map
    # disp_polygons_to_rasterize = []
    disp_polygons_to_rasterize = disp_polygons
    disp_polygon_map = polygon_utils.draw_polygon_map(
        disp_polygons_to_rasterize,
        spatial_shape,
        fill=True,
        edges=True,
        vertices=True)

    # Compute output_res
    output_res = model.MapAlignModel.get_output_res(config["input_res"],
                                                    config["pool_count"])
    # print("output_res: {}".format(output_res))

    map_align_model = model.MapAlignModel(
        config["model_name"], config["input_res"], config["add_image_input"],
        config["image_channel_count"], config["image_feature_base_count"],
        config["add_poly_map_input"], config["poly_map_channel_count"],
        config["poly_map_feature_base_count"],
        config["common_feature_base_count"], config["pool_count"],
        config["add_disp_output"], config["disp_channel_count"],
        config["add_seg_output"], config["seg_channel_count"], output_res,
        batch_size, config["loss_params"], config["level_loss_coefs_params"],
        config["learning_rate_params"], config["weight_decay"],
        config["image_dynamic_range"], config["disp_map_dynamic_range_fac"],
        model_disp_max_abs_value)

    pred_field_map, segmentation_image = map_align_model.inference(
        image, disp_polygon_map, checkpoints_dir)

    # --- align disp_polygon according to pred_field_map --- #
    # print("# --- Align disp_polygon according to pred_field_map --- #")
    aligned_disp_polygons = disp_polygons
    # First remove polygons that are not fully inside the inner_image
    padding = (spatial_shape[0] - pred_field_map.shape[0]) // 2
    bounding_box = [
        padding, padding, spatial_shape[0] - padding,
        spatial_shape[1] - padding
    ]
    # aligned_disp_polygons = polygon_utils.filter_polygons_in_bounding_box(aligned_disp_polygons, bounding_box)  # TODO: reimplement? But also filter out ori_gt_polygons for comparaison
    aligned_disp_polygons = polygon_utils.transform_polygons_to_bounding_box_space(
        aligned_disp_polygons, bounding_box)
    # Then apply displacement field map to aligned_disp_polygons
    aligned_disp_polygons = polygon_utils.apply_disp_map_to_polygons(
        pred_field_map, aligned_disp_polygons)
    # Restore polygons to original image space
    bounding_box = [
        -padding, -padding, spatial_shape[0] + padding,
        spatial_shape[1] + padding
    ]
    aligned_disp_polygons = polygon_utils.transform_polygons_to_bounding_box_space(
        aligned_disp_polygons, bounding_box)

    # Add padding to segmentation_image
    final_segmentation_image = np.zeros(
        (spatial_shape[0], spatial_shape[1], segmentation_image.shape[2]))
    final_segmentation_image[padding:-padding,
                             padding:-padding, :] = segmentation_image

    # --- Upsample outputs --- #
    # print("# --- Upsample outputs --- #")
    final_segmentation_image, aligned_disp_polygons = upsample_data(
        final_segmentation_image, ori_metadata, aligned_disp_polygons,
        scale_factor, config["reference_pixel_size"])

    return aligned_disp_polygons, final_segmentation_image
def process_image(dataset_raw_dirpath, image_info, patch_stride, patch_res, downsampling_factors, disp_max_abs_value,
                  include_polygons,
                  downsampling_factor_writers):
    """
    Writes to all the writers (one for each resolution) all sample patches extracted from the image_info.

    :param raw_dirpath:
    :param image_info:
    :param patch_stride:
    :param patch_res:
    :param downsampling_factors:
    :param disp_max_abs_value:
    :param include_polygons:
    :param downsampling_factor_writers:
    :return:
    """
    ori_image, ori_metadata, ori_gt_polygons = read.load_gt_data(dataset_raw_dirpath, image_info["city"],
                                                                 image_info["number"])

    if ori_gt_polygons is None:
        return False

    ori_gt_polygons = polygon_utils.polygons_remove_holes(ori_gt_polygons)  # TODO: Remove

    # Remove redundant vertices
    ori_gt_polygons = polygon_utils.simplify_polygons(ori_gt_polygons, tolerance=1)

    # visualization.init_figures(["gt_data"], figsize=(60, 40))
    # visualization.plot_example_polygons("gt_data", ori_image, ori_gt_polygons)

    # Create displacement maps
    ori_normed_disp_field_maps = math_utils.create_displacement_field_maps(ori_image.shape[:2], config.DISP_MAP_COUNT,
                                                                           config.DISP_MODES,
                                                                           config.DISP_GAUSS_MU_RANGE,
                                                                           config.DISP_GAUSS_SIG_SCALING)  # TODO: uncomment
    # ori_normed_disp_field_maps = np.zeros((config.DISP_MAP_COUNT, ori_image.shape[0], ori_image.shape[1], 2))  # TODO: remove

    # # TODO: remove
    # np.random.seed(seed=0)
    # colors = np.random.randint(0, 255, size=(len(downsampling_factors), 3), dtype=np.uint8)

    for index, downsampling_factor in enumerate(downsampling_factors):
        print("downsampling_factor: {}".format(downsampling_factor))
        # Downsample ground-truth
        image, gt_polygons, normed_disp_field_maps = downsample_gt_data(ori_image, ori_metadata, ori_gt_polygons,
                                                                        ori_normed_disp_field_maps, downsampling_factor)
        spatial_shape = image.shape[:2]

        # Random color
        # image = np.tile(colors[index], reps=[image.shape[0], image.shape[1], 1])  # TODO: remove

        # Draw gt polygon map
        gt_polygon_map = polygon_utils.draw_polygon_map(gt_polygons, spatial_shape, fill=True, edges=True,
                                                        vertices=True)

        # Generate final displacement
        disp_polygons_list, disp_polygon_maps = generate_disp_data(normed_disp_field_maps, gt_polygons,
                                                                   disp_max_abs_value, spatial_shape)

        # Compress data
        gt_polygons = [polygon.astype(np.float16) for polygon in gt_polygons]
        disp_polygons_list = [[polygon.astype(np.float16) for polygon in polygons] for polygons in disp_polygons_list]
        disp_field_maps = normed_disp_field_maps * 32767  # int16 max value = 32767
        disp_field_maps = np.round(disp_field_maps)
        disp_field_maps = disp_field_maps.astype(np.int16)

        # Cut sample into patches
        if include_polygons:
            patches = process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map,
                                                  disp_field_maps, disp_polygon_maps,
                                                  gt_polygons, disp_polygons_list)
        else:
            patches = process_sample_into_patches(patch_stride, patch_res, image, gt_polygon_map,
                                                  disp_field_maps, disp_polygon_maps)

        for patch in patches:
            save_patch_to_tfrecord(patch, downsampling_factor_writers[downsampling_factor])

    return True
Esempio n. 7
0
def process_image(reader, image_id, downsampling_factors, disp_field_maps_patch_creator, disp_max_abs_value,
                  include_polygons,
                  downsampling_factor_writers):
    """
    Writes to all the writers (one for each resolution) all sample patches extracted from the image_info.

    :param reader:
    :param image_id:
    :param downsampling_factors:
    :param disp_field_maps_patch_creator:
    :param disp_max_abs_value:
    :param include_polygons:
    :param downsampling_factor_writers:
    :return:
    """
    ori_image, ori_metadata, ori_gt_polygons = reader.load_gt_data(image_id)

    ori_gt_polygons = polygon_utils.polygons_remove_holes(ori_gt_polygons)  # TODO: Remove

    # Remove redundant vertices
    ori_gt_polygons = polygon_utils.simplify_polygons(ori_gt_polygons, tolerance=1)

    # visualization.init_figures(["gt_data"], figsize=(60, 40))
    # visualization.plot_example_polygons("gt_data", ori_image, ori_gt_polygons)

    # Get displacement maps
    ori_normed_disp_field_maps = disp_field_maps_patch_creator.get_patch()
    # ori_normed_disp_field_maps = np.zeros((config.DISP_MAP_COUNT, ori_image.shape[0], ori_image.shape[1], 2))  # TODO: remove

    # # TODO: remove
    # np.random.seed(seed=0)
    # colors = np.random.randint(0, 255, size=(len(downsampling_factors), 3), dtype=np.uint8)

    for index, downsampling_factor in enumerate(downsampling_factors):
        # print("downsampling_factor: {}".format(downsampling_factor))
        # Downsample ground-truth
        image, gt_polygons, normed_disp_field_maps = downsample_gt_data(ori_image, ori_metadata, ori_gt_polygons,
                                                                        ori_normed_disp_field_maps, downsampling_factor)

        spatial_shape = image.shape[:2]

        # Random color
        # image = np.tile(colors[index], reps=[image.shape[0], image.shape[1], 1])  # TODO: remove

        # Draw gt polygon map
        gt_polygon_map = polygon_utils.draw_polygon_map(gt_polygons, spatial_shape, fill=True, edges=True,
                                                        vertices=True)

        # Generate final displacement
        disp_polygons_list, disp_polygon_maps = generate_disp_data(normed_disp_field_maps, gt_polygons,
                                                                   disp_max_abs_value, spatial_shape)

        if gt_polygons[0][0][0] == np.nan or gt_polygons[0][0][1] == np.nan:
            print(gt_polygons[0][0])

        if disp_polygons_list[0][0][0][0] == np.nan or disp_polygons_list[0][0][0][1] == np.nan:
            print("disp_polygons_list:")
            print(disp_polygons_list[0][0])

        # Compress data
        gt_polygons = [polygon.astype(np.float16) for polygon in gt_polygons]
        disp_polygons_list = [[polygon.astype(np.float16) for polygon in polygons] for polygons in disp_polygons_list]
        disp_field_maps = normed_disp_field_maps * 32767  # int16 max value = 32767
        disp_field_maps = np.round(disp_field_maps)
        disp_field_maps = disp_field_maps.astype(np.int16)

        if include_polygons:
            gt_polygons, \
            disp_polygons_array = polygon_utils.prepare_polygons_for_tfrecord(gt_polygons, disp_polygons_list)
        else:
            gt_polygons = disp_polygons_array = None

        assert image.shape[0] == image.shape[1], "image should be square otherwise tile_res cannot be defined"
        tile_res = image.shape[0]
        disp_map_count = disp_polygon_maps.shape[0]

        patch = {
            "tile_res": tile_res,
            "disp_map_count": disp_map_count,
            "image": image,
            "gt_polygons": gt_polygons,
            "disp_polygons": disp_polygons_array,
            "gt_polygon_map": gt_polygon_map,
            "disp_field_maps": disp_field_maps,
            "disp_polygon_maps": disp_polygon_maps,
        }

        save_patch_to_tfrecord(patch, downsampling_factor_writers[downsampling_factor])

    return True
Esempio n. 8
0
def inference(ori_image, ori_metadata, ori_disp_polygons,
              model_disp_max_abs_value, batch_size, scale_factor, run_name):
    # Downsample
    image, disp_polygons = downsample_data(ori_image, ori_metadata,
                                           ori_disp_polygons, scale_factor)
    spatial_shape = image.shape[:2]

    # Draw displaced polygon map
    # disp_polygons_to_rasterize = []
    disp_polygons_to_rasterize = disp_polygons
    disp_polygon_map = polygon_utils.draw_polygon_map(
        disp_polygons_to_rasterize,
        spatial_shape,
        fill=True,
        edges=True,
        vertices=True)

    # Compute output_res
    output_res = model.MapAlignModel.get_output_res(config.INPUT_RES,
                                                    config.POOL_COUNT)
    print("output_res: {}".format(output_res))

    map_align_model = model.MapAlignModel(
        config.MODEL_NAME, config.INPUT_RES, config.IMAGE_INPUT_CHANNELS,
        config.IMAGE_DYNAMIC_RANGE, config.DISP_MAP_DYNAMIC_RANGE_FAC,
        config.POLY_MAP_INPUT_CHANNELS, config.IMAGE_FEATURE_BASE_COUNT,
        config.POLY_MAP_FEATURE_BASE_COUNT, config.COMMON_FEATURE_BASE_COUNT,
        config.POOL_COUNT, output_res, config.DISP_OUTPUT_CHANNELS,
        model_disp_max_abs_value, config.ADD_SEG_OUTPUT,
        config.SEG_OUTPUT_CHANNELS, batch_size, config.LEARNING_RATE_PARAMS,
        config.LEVEL_LOSS_COEFS_PARAMS, config.DISP_LOSS_COEF,
        config.SEG_LOSS_COEF, config.LAPLACIAN_PENALTY_COEF,
        config.WEIGHT_DECAY)

    run_dir = run_utils.setup_run_dir(config.RUNS_DIR, run_name)
    _, checkpoints_dir = run_utils.setup_run_subdirs(
        run_dir, config.LOGS_DIRNAME, config.CHECKPOINTS_DIRNAME)
    pred_field_map, segmentation_image = map_align_model.inference(
        image, disp_polygon_map, checkpoints_dir)

    # --- Align disp_polygon according to pred_field_map --- #
    print("# --- Align disp_polygon according to pred_field_map --- #")
    aligned_disp_polygons = disp_polygons
    # First remove polygons that are not fully inside the inner_image
    padding = (spatial_shape[0] - pred_field_map.shape[0]) // 2
    bounding_box = [
        padding, padding, spatial_shape[0] - padding,
        spatial_shape[1] - padding
    ]
    # aligned_disp_polygons = polygon_utils.filter_polygons_in_bounding_box(aligned_disp_polygons, bounding_box)  # TODO: reimplement? But also filter out ori_gt_polygons for comparaison
    aligned_disp_polygons = polygon_utils.transform_polygons_to_bounding_box_space(
        aligned_disp_polygons, bounding_box)
    # Then apply displacement field map to aligned_disp_polygons
    aligned_disp_polygons = polygon_utils.apply_disp_map_to_polygons(
        pred_field_map, aligned_disp_polygons)
    # Restore polygons to original image space
    bounding_box = [
        -padding, -padding, spatial_shape[0] + padding,
        spatial_shape[1] + padding
    ]
    aligned_disp_polygons = polygon_utils.transform_polygons_to_bounding_box_space(
        aligned_disp_polygons, bounding_box)

    # Add padding to segmentation_image
    final_segmentation_image = np.zeros(
        (spatial_shape[0], spatial_shape[1], segmentation_image.shape[2]))
    final_segmentation_image[padding:-padding,
                             padding:-padding, :] = segmentation_image

    # --- Upsample outputs --- #
    print("# --- Upsample outputs --- #")
    final_segmentation_image, aligned_disp_polygons = upsample_data(
        final_segmentation_image, ori_metadata, aligned_disp_polygons,
        scale_factor)

    return aligned_disp_polygons, final_segmentation_image