def standardize(images, band, stats, output_dir, convert_int16, bands,
                chunk_x_size):

    for image_path in images:

        output_image_path = dl_utils.new_filepath(image_path, suffix = 'stand', \
         directory=output_dir)

        print("Standardizing band " + str(band) + ' ' + image_path + " => " +
              output_image_path)

        if not Path(output_image_path).is_file():
            dataType = gdal.GDT_Float32
            nbands = len(bands)

            if convert_int16:
                dataType = gdal.GDT_Int16

            output_ds = dl_utils.create_output_file(image_path, output_image_path, \
             nbands, dataType)

        else:
            output_ds = gdal.Open(output_image_path, gdal.GA_Update)

        input_ds = gdal.Open(image_path, gdal.GA_ReadOnly)

        x_size = input_ds.RasterXSize
        y_Size = input_ds.RasterYSize

        for xoff in range(0, x_size, chunk_x_size):
            if (xoff + chunk_x_size) > x_size:
                chunk_x_size = x_size - xoff

            output_band_ds = output_ds.GetRasterBand(band)

            intput_band_ds = input_ds.GetRasterBand(band)
            band_data = intput_band_ds.ReadAsArray(xoff, 0, chunk_x_size,
                                                   y_Size)
            band_data = band_data.astype('Float32')

            validPixels = (band_data != stats['nodata'])
            band_data[validPixels] = (band_data[validPixels] -
                                      stats['median']) / stats['std']
            band_data[np.logical_not(validPixels)] = output_nodata

            if convert_int16:
                positive_outliers = (band_data >= 3.2760)
                negative_outliers = (band_data <= -3.2760)

                band_data[positive_outliers] = 3.2760
                band_data[negative_outliers] = -3.2760

                band_data[np.logical_not(validPixels)] = -3.2767

                band_data = band_data * 10000
                band_data = band_data.astype('Int16')

            output_band_ds.WriteArray(band_data, xoff, 0)
Esempio n. 2
0
def exec(images, model_dir, output_dir, memory_percentage=40):
    tf.logging.set_verbosity(tf.logging.INFO)

    dl_utils.mkdirp(output_dir)

    param_path = dl_utils.new_filepath('train_params.dat', directory=model_dir)
    params = dl_utils.load_object(param_path)

    chips_info_path = dl_utils.new_filepath('chips_info.dat',
                                            directory=model_dir)
    chips_info = dl_utils.load_object(chips_info_path)

    for in_image in images:

        in_image_ds = gdal.Open(in_image)
        out_image = dl_utils.new_filepath(in_image,
                                          suffix='pred',
                                          ext='tif',
                                          directory=output_dir)
        out_image_ds = dl_utils.create_output_file(in_image, out_image)
        out_band = out_image_ds.GetRasterBand(1)

        estimator = tf.estimator.Estimator(model_fn=md.description,
                                           params=params,
                                           model_dir=model_dir)

        print(chips_info)
        _, dat_xsize, dat_ysize, dat_nbands = chips_info['dat_shape']
        _, exp_xsize, exp_ysize, _ = chips_info['exp_shape']
        pad_size = int((dat_xsize - exp_xsize) / 2)

        input_positions = dl_utils.get_predict_positions(
            in_image_ds.RasterXSize, in_image_ds.RasterYSize, exp_xsize,
            pad_size)

        cache_chip_data = []
        cache_out_position = []

        count = 0
        for i in range(len(input_positions)):
            input_position = input_positions[i]

            try:
                chip_data, out_position = dl_utils.get_predict_data(
                    in_image_ds, input_position, pad_size)
            except IOError as error:
                print(error)
                print('Ignoring this data block')
                continue

            cache_chip_data.append(chip_data)
            cache_out_position.append(out_position)

            print("Reading image " + in_image + ": memory percentage " +
                  str(dl_utils.memory_percentage()) + "%")

            if (dl_utils.memory_percentage() >
                    memory_percentage) or i == (len(input_positions) - 1):
                input_data = np.stack(cache_chip_data)

                del cache_chip_data
                cache_chip_data = []

                input_data = input_data[:, :, :, 0:dat_nbands]

                tensors_to_log = {}

                print("Classifying image " + in_image + ": progress " +
                      str(float(i) / len(input_positions) * 100) + "%")
                predict_input_fn = tf.estimator.inputs.numpy_input_fn(
                    x={"data": input_data},
                    batch_size=params['batch_size'],
                    shuffle=False)
                predict_results = estimator.predict(input_fn=predict_input_fn)

                print("Writing classification result in " + out_image)
                for chip_predict, out_position in zip(predict_results,
                                                      cache_out_position):
                    out_predict = dl_utils.discretize_values(
                        chip_predict, 1, 0)

                    out_x0 = out_position[0]
                    out_xy = out_position[1]
                    count = count + 1
                    out_band.WriteArray(out_predict[:, :, 0], out_x0, out_xy)

                out_band.FlushCache()

                del input_data
                del predict_results
                cache_out_position = []
                gc.collect()