Exemplo n.º 1
0
def compute_grads(raw_dirpath, runs_dirpath, run_name, ds_fac,
                  overwrite_config, tile_info_list, polygon_dirname,
                  output_dirname, output_filepath_format):
    # -- Params:

    # Setup run dir and load config file
    run_dir = run_utils.setup_run_dir(runs_dirpath, run_name)
    _, checkpoints_dir = run_utils.setup_run_subdirs(run_dir)

    config = run_utils.load_config(config_dirpath=run_dir)

    # --- Instantiate model
    output_res = model.MapAlignModel.get_output_res(
        overwrite_config["input_res"], config["pool_count"])
    map_align_model = model.MapAlignModel(
        config["model_name"], overwrite_config["input_res"],
        config["add_image_input"], config["image_channel_count"],
        config["image_feature_base_count"], config["add_poly_map_input"],
        config["poly_map_channel_count"],
        config["poly_map_feature_base_count"],
        config["common_feature_base_count"], config["pool_count"],
        config["add_disp_output"], config["disp_channel_count"],
        config["add_seg_output"], config["seg_channel_count"], output_res,
        overwrite_config["batch_size"], config["loss_params"],
        config["level_loss_coefs_params"], config["learning_rate_params"],
        config["weight_decay"], config["image_dynamic_range"],
        config["disp_map_dynamic_range_fac"], config["disp_max_abs_value"])
    map_align_model.setup_compute_grads()  # Add ops to compute gradients

    saver = tf.train.Saver(save_relative_paths=True)
    with tf.Session() as sess:
        # Restore checkpoint
        restore_checkpoint_success = map_align_model.restore_checkpoint(
            sess, saver, checkpoints_dir)
        if not restore_checkpoint_success:
            sys.exit('No checkpoint found in {}'.format(checkpoints_dir))

        # Compute patch count
        patch_total_count = 0
        for tile_info in tile_info_list:
            patch_total_count += len(tile_info["bbox_list"])

        pbar = tqdm(total=patch_total_count,
                    desc="Computing patch gradients: ")
        for tile_info in tile_info_list:
            # --- Path setup:
            unused_filepath = output_filepath_format.format(
                dir=raw_dirpath,
                fold=tile_info["fold"],
                out_dir=output_dirname,
                tile="",
                b0=0,
                b1=0,
                b2=0,
                b3=0,
                out_name="",
                ext="")
            os.makedirs(os.path.dirname(unused_filepath), exist_ok=True)
            tile_name = read.IMAGE_NAME_FORMAT.format(
                city=tile_info["city"], number=tile_info["number"])

            # Compute grads for that image
            additional_args = {
                "overwrite_polygon_dir_name": polygon_dirname,
            }
            # t = time.clock()
            image, metadata, polygons = read.load_gt_data(
                raw_dirpath,
                tile_info["city"],
                tile_info["number"],
                additional_args=additional_args)
            # t_read = time.clock() - t
            # Downsample
            image, polygons = process_utils.downsample_data(
                image, metadata, polygons, ds_fac,
                config["reference_pixel_size"])
            spatial_shape = image.shape[:2]

            # Draw polygon map
            # t = time.clock()
            polygon_map = polygon_utils.draw_polygon_map(polygons,
                                                         spatial_shape,
                                                         fill=True,
                                                         edges=True,
                                                         vertices=True)
            # t_draw = time.clock() - t

            t_grads = 0
            t_save = 0
            for bbox in tile_info["bbox_list"]:
                p_im = image[bbox[0]:bbox[2], bbox[1]:bbox[3], :]
                p_polygon_map = polygon_map[bbox[0]:bbox[2],
                                            bbox[1]:bbox[3], :]
                # p_polygons = polygon_utils.crop_polygons_to_patch_if_touch(polygons, bbox)

                # Grad compute
                t = time.clock()
                grads = map_align_model.compute_grads(sess, p_im,
                                                      p_polygon_map)
                t_grads += time.clock() - t

                # Saving
                t = time.clock()
                flattened_grads_x = get_flattened_gradients(grads["x"])
                flattened_grads_y = get_flattened_gradients(grads["y"])
                flattened_grads = np.stack(
                    [flattened_grads_x, flattened_grads_y], axis=-1)

                # # Save patch for later visualization
                # im_filepath = output_filepath_format.format(dir=raw_dirpath, fold=tile_info["fold"],
                #                                             out_dir=output_dirname, tile=tile_name,
                #                                             b0=bbox[0], b1=bbox[1], b2=bbox[2], b3=bbox[3],
                #                                             out_name="image", ext="png")
                # skimage.io.imsave(im_filepath, p_im)
                # # Save polygons as well
                # polygons_filepath = output_filepath_format.format(dir=raw_dirpath, fold=tile_info["fold"],
                #                                                   out_dir=output_dirname, tile=tile_name,
                #                                                   b0=bbox[0], b1=bbox[1], b2=bbox[2], b3=bbox[3],
                #                                                   out_name="polygons", ext="npy")
                # np.save(polygons_filepath, p_polygons)
                # Save grads
                grads_filepath = output_filepath_format.format(
                    dir=raw_dirpath,
                    fold=tile_info["fold"],
                    out_dir=output_dirname,
                    tile=tile_name,
                    b0=bbox[0],
                    b1=bbox[1],
                    b2=bbox[2],
                    b3=bbox[3],
                    out_name="grads",
                    ext="npy")
                np.save(grads_filepath, flattened_grads)
                t_save += time.clock() - t

            pbar.update(len(tile_info["bbox_list"]))
            pbar.set_postfix(t_grads=t_grads, t_save=t_save)
        pbar.close()
Exemplo n.º 2
0
def inference(runs_dirpath, ori_image, ori_metadata, ori_disp_polygons,
              model_disp_max_abs_value, batch_size, scale_factor, run_name):
    # Setup run dir and load config file
    run_dir = run_utils.setup_run_dir(runs_dirpath, run_name)
    _, checkpoints_dir = run_utils.setup_run_subdirs(run_dir)

    config = run_utils.load_config(
        config_dirpath=os.path.dirname(os.path.realpath(__file__)))
    #run_dir) why would there be a second config in run dir??

    # Downsample
    image, disp_polygons = downsample_data(ori_image, ori_metadata,
                                           ori_disp_polygons, scale_factor,
                                           config["reference_pixel_size"])
    spatial_shape = image.shape[:2]

    # Draw displaced polygon map
    # disp_polygons_to_rasterize = []
    disp_polygons_to_rasterize = disp_polygons
    disp_polygon_map = polygon_utils.draw_polygon_map(
        disp_polygons_to_rasterize,
        spatial_shape,
        fill=True,
        edges=True,
        vertices=True)

    # Compute output_res
    output_res = model.MapAlignModel.get_output_res(config["input_res"],
                                                    config["pool_count"])
    # print("output_res: {}".format(output_res))

    map_align_model = model.MapAlignModel(
        config["model_name"], config["input_res"], config["add_image_input"],
        config["image_channel_count"], config["image_feature_base_count"],
        config["add_poly_map_input"], config["poly_map_channel_count"],
        config["poly_map_feature_base_count"],
        config["common_feature_base_count"], config["pool_count"],
        config["add_disp_output"], config["disp_channel_count"],
        config["add_seg_output"], config["seg_channel_count"], output_res,
        batch_size, config["loss_params"], config["level_loss_coefs_params"],
        config["learning_rate_params"], config["weight_decay"],
        config["image_dynamic_range"], config["disp_map_dynamic_range_fac"],
        model_disp_max_abs_value)

    pred_field_map, segmentation_image = map_align_model.inference(
        image, disp_polygon_map, checkpoints_dir)

    # --- align disp_polygon according to pred_field_map --- #
    # print("# --- Align disp_polygon according to pred_field_map --- #")
    aligned_disp_polygons = disp_polygons
    # First remove polygons that are not fully inside the inner_image
    padding = (spatial_shape[0] - pred_field_map.shape[0]) // 2
    bounding_box = [
        padding, padding, spatial_shape[0] - padding,
        spatial_shape[1] - padding
    ]
    # aligned_disp_polygons = polygon_utils.filter_polygons_in_bounding_box(aligned_disp_polygons, bounding_box)  # TODO: reimplement? But also filter out ori_gt_polygons for comparaison
    aligned_disp_polygons = polygon_utils.transform_polygons_to_bounding_box_space(
        aligned_disp_polygons, bounding_box)
    # Then apply displacement field map to aligned_disp_polygons
    aligned_disp_polygons = polygon_utils.apply_disp_map_to_polygons(
        pred_field_map, aligned_disp_polygons)
    # Restore polygons to original image space
    bounding_box = [
        -padding, -padding, spatial_shape[0] + padding,
        spatial_shape[1] + padding
    ]
    aligned_disp_polygons = polygon_utils.transform_polygons_to_bounding_box_space(
        aligned_disp_polygons, bounding_box)

    # Add padding to segmentation_image
    final_segmentation_image = np.zeros(
        (spatial_shape[0], spatial_shape[1], segmentation_image.shape[2]))
    final_segmentation_image[padding:-padding,
                             padding:-padding, :] = segmentation_image

    # --- Upsample outputs --- #
    # print("# --- Upsample outputs --- #")
    final_segmentation_image, aligned_disp_polygons = upsample_data(
        final_segmentation_image, ori_metadata, aligned_disp_polygons,
        scale_factor, config["reference_pixel_size"])

    return aligned_disp_polygons, final_segmentation_image
Exemplo n.º 3
0
def train(init_run_dirpath, run_dirpath, batch_size, ds_fac_list,
          ds_repeat_list):
    # Setup init checkpoints directory path if one is specified:
    if init_run_dirpath is not None:
        _, init_checkpoints_dirpath = run_utils.setup_run_subdirs(
            init_run_dirpath, config.LOGS_DIRNAME, config.CHECKPOINTS_DIRNAME)
    else:
        init_checkpoints_dirpath = None

    # Setup stage run dirs
    # Create run subdirectories if they do not exist
    logs_dirpath, checkpoints_dirpath = run_utils.setup_run_subdirs(
        run_dirpath, config.LOGS_DIRNAME, config.CHECKPOINTS_DIRNAME)

    # Compute output_res
    output_res = model.MapAlignModel.get_output_res(config.INPUT_RES,
                                                    config.POOL_COUNT)
    print("output_res: {}".format(output_res))

    # Instantiate model object (resets the default graph)
    map_align_model = model.MapAlignModel(
        config.MODEL_NAME, config.INPUT_RES, config.IMAGE_INPUT_CHANNELS,
        config.IMAGE_DYNAMIC_RANGE, config.DISP_MAP_DYNAMIC_RANGE_FAC,
        config.POLY_MAP_INPUT_CHANNELS, config.IMAGE_FEATURE_BASE_COUNT,
        config.POLY_MAP_FEATURE_BASE_COUNT, config.COMMON_FEATURE_BASE_COUNT,
        config.POOL_COUNT, output_res, config.DISP_OUTPUT_CHANNELS,
        config.DISP_MAX_ABS_VALUE, config.ADD_SEG_OUTPUT,
        config.SEG_OUTPUT_CHANNELS, batch_size, config.LEARNING_RATE_PARAMS,
        config.LEVEL_LOSS_COEFS_PARAMS, config.DISP_LOSS_COEF,
        config.SEG_LOSS_COEF, config.LAPLACIAN_PENALTY_COEF,
        config.WEIGHT_DECAY)

    # Train dataset
    train_dataset_filename_list = dataset_multires.create_dataset_filename_list(
        config.TFRECORDS_DIR_LIST,
        config.TFRECORD_FILENAME_FORMAT,
        ds_fac_list,
        dataset="train",
        resolution_file_repeats=ds_repeat_list)
    train_dataset_tensors = dataset_multires.read_and_decode(
        train_dataset_filename_list,
        output_res,
        config.INPUT_RES,
        batch_size,
        config.IMAGE_DYNAMIC_RANGE,
        disp_map_dynamic_range_fac=config.DISP_MAP_DYNAMIC_RANGE_FAC,
        keep_poly_prob=config.KEEP_POLY_PROB,
        data_aug=config.DATA_AUG,
        train=True)

    # Val dataset
    val_dataset_filename_list = dataset_multires.create_dataset_filename_list(
        config.TFRECORDS_DIR_LIST,
        config.TFRECORD_FILENAME_FORMAT,
        ds_fac_list,
        dataset="val",
        resolution_file_repeats=ds_repeat_list)
    val_dataset_tensors = dataset_multires.read_and_decode(
        val_dataset_filename_list,
        output_res,
        config.INPUT_RES,
        batch_size,
        config.IMAGE_DYNAMIC_RANGE,
        disp_map_dynamic_range_fac=config.DISP_MAP_DYNAMIC_RANGE_FAC,
        keep_poly_prob=config.KEEP_POLY_PROB,
        data_aug=False,
        train=False)

    # Launch training
    map_align_model.optimize(train_dataset_tensors,
                             val_dataset_tensors,
                             config.MAX_ITER,
                             config.DROPOUT_KEEP_PROB,
                             logs_dirpath,
                             config.TRAIN_SUMMARY_STEP,
                             config.VAL_SUMMARY_STEP,
                             checkpoints_dirpath,
                             config.CHECKPOINT_STEP,
                             init_checkpoints_dirpath=init_checkpoints_dirpath,
                             plot_results=config.PLOT_RESULTS)
Exemplo n.º 4
0
def train(config, tfrecords_dirpath_list, init_run_dirpath, run_dirpath,
          batch_size, ds_fac_list, ds_repeat_list):
    # setup init checkpoints directory path if one is specified:
    if init_run_dirpath is not None:
        _, init_checkpoints_dirpath = run_utils.setup_run_subdirs(
            init_run_dirpath, config["logs_dirname"],
            config["checkpoints_dirname"])
    else:
        init_checkpoints_dirpath = None

    # setup stage run dirs
    # create run subdirectories if they do not exist
    logs_dirpath, checkpoints_dirpath = run_utils.setup_run_subdirs(
        run_dirpath, config["logs_dirname"], config["checkpoints_dirname"])

    # compute output_res
    output_res = model.MapAlignModel.get_output_res(config["input_res"],
                                                    config["pool_count"])
    print("output_res: {}".format(output_res))

    # instantiate model object (resets the default graph)
    map_align_model = model.MapAlignModel(
        config["model_name"], config["input_res"], config["add_image_input"],
        config["image_channel_count"], config["image_feature_base_count"],
        config["add_poly_map_input"], config["poly_map_channel_count"],
        config["poly_map_feature_base_count"],
        config["common_feature_base_count"], config["pool_count"],
        config["add_disp_output"], config["disp_channel_count"],
        config["add_seg_output"], config["seg_channel_count"], output_res,
        batch_size, config["loss_params"], config["level_loss_coefs_params"],
        config["learning_rate_params"], config["weight_decay"],
        config["image_dynamic_range"], config["disp_map_dynamic_range_fac"],
        config["disp_max_abs_value"])

    # train dataset
    train_dataset_filename_list = dataset_multires.create_dataset_filename_list(
        tfrecords_dirpath_list,
        config["tfrecord_filename_format"],
        ds_fac_list,
        dataset="train",
        resolution_file_repeats=ds_repeat_list)
    train_dataset_tensors = dataset_multires.read_and_decode(
        train_dataset_filename_list,
        output_res,
        config["input_res"],
        batch_size,
        config["image_dynamic_range"],
        disp_map_dynamic_range_fac=config["disp_map_dynamic_range_fac"],
        keep_poly_prob=config["keep_poly_prob"],
        data_aug=config["data_aug"],
        train=True)

    if config["perform_validation_step"]:
        # val dataset
        val_dataset_filename_list = dataset_multires.create_dataset_filename_list(
            tfrecords_dirpath_list,
            config["tfrecord_filename_format"],
            ds_fac_list,
            dataset="val",
            resolution_file_repeats=ds_repeat_list)
        val_dataset_tensors = dataset_multires.read_and_decode(
            val_dataset_filename_list,
            output_res,
            config["input_res"],
            batch_size,
            config["image_dynamic_range"],
            disp_map_dynamic_range_fac=config["disp_map_dynamic_range_fac"],
            keep_poly_prob=config["keep_poly_prob"],
            data_aug=False,
            train=False)
    else:
        val_dataset_tensors = None

    # launch training
    map_align_model.optimize(train_dataset_tensors,
                             val_dataset_tensors,
                             config["max_iter"],
                             config["dropout_keep_prob"],
                             logs_dirpath,
                             config["train_summary_step"],
                             config["val_summary_step"],
                             checkpoints_dirpath,
                             config["checkpoint_step"],
                             init_checkpoints_dirpath=init_checkpoints_dirpath,
                             plot_results=config["plot_results"])
Exemplo n.º 5
0
def inference(ori_image, ori_metadata, ori_disp_polygons,
              model_disp_max_abs_value, batch_size, scale_factor, run_name):
    # Downsample
    image, disp_polygons = downsample_data(ori_image, ori_metadata,
                                           ori_disp_polygons, scale_factor)
    spatial_shape = image.shape[:2]

    # Draw displaced polygon map
    # disp_polygons_to_rasterize = []
    disp_polygons_to_rasterize = disp_polygons
    disp_polygon_map = polygon_utils.draw_polygon_map(
        disp_polygons_to_rasterize,
        spatial_shape,
        fill=True,
        edges=True,
        vertices=True)

    # Compute output_res
    output_res = model.MapAlignModel.get_output_res(config.INPUT_RES,
                                                    config.POOL_COUNT)
    print("output_res: {}".format(output_res))

    map_align_model = model.MapAlignModel(
        config.MODEL_NAME, config.INPUT_RES, config.IMAGE_INPUT_CHANNELS,
        config.IMAGE_DYNAMIC_RANGE, config.DISP_MAP_DYNAMIC_RANGE_FAC,
        config.POLY_MAP_INPUT_CHANNELS, config.IMAGE_FEATURE_BASE_COUNT,
        config.POLY_MAP_FEATURE_BASE_COUNT, config.COMMON_FEATURE_BASE_COUNT,
        config.POOL_COUNT, output_res, config.DISP_OUTPUT_CHANNELS,
        model_disp_max_abs_value, config.ADD_SEG_OUTPUT,
        config.SEG_OUTPUT_CHANNELS, batch_size, config.LEARNING_RATE_PARAMS,
        config.LEVEL_LOSS_COEFS_PARAMS, config.DISP_LOSS_COEF,
        config.SEG_LOSS_COEF, config.LAPLACIAN_PENALTY_COEF,
        config.WEIGHT_DECAY)

    run_dir = run_utils.setup_run_dir(config.RUNS_DIR, run_name)
    _, checkpoints_dir = run_utils.setup_run_subdirs(
        run_dir, config.LOGS_DIRNAME, config.CHECKPOINTS_DIRNAME)
    pred_field_map, segmentation_image = map_align_model.inference(
        image, disp_polygon_map, checkpoints_dir)

    # --- Align disp_polygon according to pred_field_map --- #
    print("# --- Align disp_polygon according to pred_field_map --- #")
    aligned_disp_polygons = disp_polygons
    # First remove polygons that are not fully inside the inner_image
    padding = (spatial_shape[0] - pred_field_map.shape[0]) // 2
    bounding_box = [
        padding, padding, spatial_shape[0] - padding,
        spatial_shape[1] - padding
    ]
    # aligned_disp_polygons = polygon_utils.filter_polygons_in_bounding_box(aligned_disp_polygons, bounding_box)  # TODO: reimplement? But also filter out ori_gt_polygons for comparaison
    aligned_disp_polygons = polygon_utils.transform_polygons_to_bounding_box_space(
        aligned_disp_polygons, bounding_box)
    # Then apply displacement field map to aligned_disp_polygons
    aligned_disp_polygons = polygon_utils.apply_disp_map_to_polygons(
        pred_field_map, aligned_disp_polygons)
    # Restore polygons to original image space
    bounding_box = [
        -padding, -padding, spatial_shape[0] + padding,
        spatial_shape[1] + padding
    ]
    aligned_disp_polygons = polygon_utils.transform_polygons_to_bounding_box_space(
        aligned_disp_polygons, bounding_box)

    # Add padding to segmentation_image
    final_segmentation_image = np.zeros(
        (spatial_shape[0], spatial_shape[1], segmentation_image.shape[2]))
    final_segmentation_image[padding:-padding,
                             padding:-padding, :] = segmentation_image

    # --- Upsample outputs --- #
    print("# --- Upsample outputs --- #")
    final_segmentation_image, aligned_disp_polygons = upsample_data(
        final_segmentation_image, ori_metadata, aligned_disp_polygons,
        scale_factor)

    return aligned_disp_polygons, final_segmentation_image