band_data = band_data * 10000
                band_data = band_data.astype('Int16')

            output_band_ds.WriteArray(band_data, xoff, 0)


if __name__ == "__main__":
    args = parse_args()

    images = args.images
    bands = args.bands
    chunk_x_size = args.chunk_size
    output_nodata = args.out_nodata
    in_nodata = args.in_nodata
    convert_int16 = args.convert_int16
    output_dir = args.output_dir

    start_time = time.time()
    dl_utils.mkdirp(output_dir)

    for band in bands:
        freq_histogram = calc_freq_histogram(images, band, in_nodata,
                                             output_dir, chunk_x_size)
        stats = calc_stats(freq_histogram, in_nodata)
        standardize(images, band, stats, output_dir, convert_int16, bands,
                    chunk_x_size)

    elapsed_time = time.time() - start_time
    print('Time elapsed ', elapsed_time)
Exemple #2
0
def exec(images, model_dir, output_dir, memory_percentage=40):
    tf.logging.set_verbosity(tf.logging.INFO)

    dl_utils.mkdirp(output_dir)

    param_path = dl_utils.new_filepath('train_params.dat', directory=model_dir)
    params = dl_utils.load_object(param_path)

    chips_info_path = dl_utils.new_filepath('chips_info.dat',
                                            directory=model_dir)
    chips_info = dl_utils.load_object(chips_info_path)

    for in_image in images:

        in_image_ds = gdal.Open(in_image)
        out_image = dl_utils.new_filepath(in_image,
                                          suffix='pred',
                                          ext='tif',
                                          directory=output_dir)
        out_image_ds = dl_utils.create_output_file(in_image, out_image)
        out_band = out_image_ds.GetRasterBand(1)

        estimator = tf.estimator.Estimator(model_fn=md.description,
                                           params=params,
                                           model_dir=model_dir)

        print(chips_info)
        _, dat_xsize, dat_ysize, dat_nbands = chips_info['dat_shape']
        _, exp_xsize, exp_ysize, _ = chips_info['exp_shape']
        pad_size = int((dat_xsize - exp_xsize) / 2)

        input_positions = dl_utils.get_predict_positions(
            in_image_ds.RasterXSize, in_image_ds.RasterYSize, exp_xsize,
            pad_size)

        cache_chip_data = []
        cache_out_position = []

        count = 0
        for i in range(len(input_positions)):
            input_position = input_positions[i]

            try:
                chip_data, out_position = dl_utils.get_predict_data(
                    in_image_ds, input_position, pad_size)
            except IOError as error:
                print(error)
                print('Ignoring this data block')
                continue

            cache_chip_data.append(chip_data)
            cache_out_position.append(out_position)

            print("Reading image " + in_image + ": memory percentage " +
                  str(dl_utils.memory_percentage()) + "%")

            if (dl_utils.memory_percentage() >
                    memory_percentage) or i == (len(input_positions) - 1):
                input_data = np.stack(cache_chip_data)

                del cache_chip_data
                cache_chip_data = []

                input_data = input_data[:, :, :, 0:dat_nbands]

                tensors_to_log = {}

                print("Classifying image " + in_image + ": progress " +
                      str(float(i) / len(input_positions) * 100) + "%")
                predict_input_fn = tf.estimator.inputs.numpy_input_fn(
                    x={"data": input_data},
                    batch_size=params['batch_size'],
                    shuffle=False)
                predict_results = estimator.predict(input_fn=predict_input_fn)

                print("Writing classification result in " + out_image)
                for chip_predict, out_position in zip(predict_results,
                                                      cache_out_position):
                    out_predict = dl_utils.discretize_values(
                        chip_predict, 1, 0)

                    out_x0 = out_position[0]
                    out_xy = out_position[1]
                    count = count + 1
                    out_band.WriteArray(out_predict[:, :, 0], out_x0, out_xy)

                out_band.FlushCache()

                del input_data
                del predict_results
                cache_out_position = []
                gc.collect()