Esempio n. 1
0
        def tile_generator():
            tgs = []
            for i in range(len(self._images)):

                if self._resume_mode:
                    # TODO: Improve feature to work with multiple epochs
                    # Skip images which we have already read some number of tiles from
                    if self._get_image_read_count(self._images[i]) > config.io.resume_cutoff():
                        continue

                try:
                    img = loader.load_image(self._images, i)

                    if self._labels: # If we have labels make sure they are the same size as the input images
                        label = loader.load_image(self._labels, i)
                        if label.size() != img.size():
                            raise Exception('Label file ' + self._labels[i] + ' with size ' + str(label.size())
                                            + ' does not match input image size of ' + str(img.size()))
                    # w * h * bands * 4 * chunk * chunk = max_block_bytes
                    tile_width = int(math.sqrt(max_block_bytes / img.num_bands() / self._data_type.size /
                                               config.io.tile_ratio()))
                    tile_height = int(config.io.tile_ratio() * tile_width)
                    min_block_size = self._chunk_size ** 2 * config.io.tile_ratio() * img.num_bands() * 4
                    if max_block_bytes < min_block_size:
                        print('Warning: max_block_bytes=%g MB, but %g MB is recommended (minimum: %g MB)'
                              % (max_block_bytes / 1024 / 1024,
                                 min_block_size * 2 / 1024 / 1024, min_block_size / 1024/ 1024),
                              file=sys.stderr)
                    if tile_width < self._chunk_size or tile_height < self._chunk_size:
                        raise ValueError('max_block_bytes is too low.')
                    tiles = img.tiles(tile_width, tile_height, min_width=self._chunk_size, min_height=self._chunk_size,
                                      overlap=self._chunk_size - 1)
                except Exception as e: #pylint: disable=W0703
                    print('Caught exception tiling image: ' + self._images[i] + ' -> ' + str(e)
                          + '\nWill not load any tiles from this image')
                    if config.general.stop_on_input_error():
                        print('Aborting processing, set --bypass-input-errors to bypass this error.')
                        raise
                    tiles = [] # Else move past this image without loading any tiles

                random.Random(0).shuffle(tiles) # gives consistent random ordering so labels will match
                tgs.append((i, tiles))
            if not tgs:
                return
            while tgs:
                cur = tgs[:config.io.interleave_images()]
                tgs = tgs[config.io.interleave_images():]
                done = False
                while not done:
                    done = True
                    for it in cur:
                        if not it[1]:
                            continue
                        t = it[1].pop(0)
                        if t:
                            done = False
                            yield (it[0], t.min_x, t.min_y, t.max_x, t.max_y)
                    if done:
                        break
Esempio n. 2
0
def main(options):
    model = tf.keras.models.load_model(
        options.model, custom_objects=delta.ml.layers.ALL_LAYERS)

    colors = np.array([[0x0, 0x0, 0x0], [0x67, 0xa9, 0xcf], [0xf6, 0xef, 0xf7],
                       [0xbd, 0xc9, 0xe1], [0x02, 0x81, 0x8a]],
                      dtype=np.uint8)
    error_colors = np.array([[0x0, 0x0, 0x0], [0xFF, 0x00, 0x00]],
                            dtype=np.uint8)

    images = config.dataset.images()
    labels = config.dataset.labels()

    if options.autoencoder:
        labels = None
    for (i, path) in enumerate(images):
        image = loader.load_image(images, i)
        base_name = os.path.splitext(os.path.basename(path))[0]
        output_image = tiff.DeltaTiffWriter('predicted_' + base_name + '.tiff')
        prob_image = None
        if options.prob:
            prob_image = tiff.DeltaTiffWriter('prob_' + base_name + '.tiff')
        error_image = None
        if labels:
            error_image = tiff.DeltaTiffWriter('errors_' + base_name + '.tiff')

        label = None
        if labels:
            label = loader.load_image(config.dataset.labels(), i)
        if options.autoencoder:
            label = image
            predictor = predict.ImagePredictor(model, output_image, True,
                                               (ae_convert, np.uint8, 3))
        else:
            predictor = predict.LabelPredictor(model,
                                               output_image,
                                               True,
                                               colormap=colors,
                                               prob_image=prob_image,
                                               error_image=error_image,
                                               error_colors=error_colors)

        try:
            predictor.predict(image, label)
        except KeyboardInterrupt:
            print('\nAborted.')
            return 0

        if labels:
            cm = predictor.confusion_matrix()
            print('%.2g%% Correct: %s' %
                  (np.sum(np.diag(cm)) / np.sum(cm) * 100, path))
            save_confusion(cm, 'confusion_' + base_name + '.pdf')

        if options.autoencoder:
            tiff.write_tiff('orig_' + base_name + '.tiff',
                            ae_convert(image.read()),
                            metadata=image.metadata())
    return 0
Esempio n. 3
0
    def _load_tensor_imagery(self, is_labels, image_index, bbox):
        """Loads a single image as a tensor."""
        data = self._labels if is_labels else self._images

        if not is_labels: # Record each time we write a tile
            file_path = data[image_index.numpy()]
            log_path  = self._get_image_read_log_path(file_path)
            if log_path:
                with portalocker.Lock(log_path, 'a', timeout=300) as f:
                    f.write(str(bbox) + '\n')
                    # TODO: What to write and when to clear it?

        try:
            image = loader.load_image(data, image_index.numpy())
            w = int(bbox[2])
            h = int(bbox[3])
            rect = rectangle.Rectangle(int(bbox[0]), int(bbox[1]), w, h)
            r = image.read(rect)
        except Exception as e: #pylint: disable=W0703
            print('Caught exception loading tile from image: ' + data[image_index.numpy()] + ' -> ' + str(e)
                  + '\nSkipping tile: ' + str(bbox))
            if config.general.stop_on_input_error():
                print('Aborting processing, set --bypass-input-errors to bypass this error.')
                raise
            # Else just skip this tile
            r = np.zeros(shape=(0,0,0), dtype=np.float32)
        return r
Esempio n. 4
0
    def __init__(self, images, labels, chunk_size, output_size, chunk_stride=1,
                 resume_mode=False, log_folder=None):
        """
        Initialize the dataset based on the specified image and label ImageSets
        """

        self._resume_mode = resume_mode
        self._log_folder  = log_folder
        if self._log_folder and not os.path.exists(self._log_folder):
            os.mkdir(self._log_folder)

        # Record some of the config values
        assert (chunk_size % 2) == (output_size % 2), 'Chunk size and output size must both be either even or odd.'
        self._chunk_size   = chunk_size
        self._output_size  = output_size
        self._output_dims  = 1
        self._chunk_stride = chunk_stride
        self._data_type    = tf.float32
        self._label_type   = tf.uint8

        if labels:
            assert len(images) == len(labels)
        self._images = images
        self._labels = labels

        # Load the first image to get the number of bands for the input files.
        self._num_bands = loader.load_image(images, 0).num_bands()
Esempio n. 5
0
 def _load_tensor_imagery(self, is_labels, image_index, bbox):
     """Loads a single image as a tensor."""
     image = loader.load_image(self._labels if is_labels else self._images,
                               image_index.numpy())
     w = int(bbox[2])
     h = int(bbox[3])
     rect = rectangle.Rectangle(int(bbox[0]), int(bbox[1]), w, h)
     r = image.read(rect)
     return r
Esempio n. 6
0
 def tile_generator():
     tgs = []
     for i in range(len(self._images)):
         img = loader.load_image(self._images, i)
         # w * h * bands * 4 * chunk * chunk = max_block_bytes
         tile_width = int(
             math.sqrt(max_block_bytes / img.num_bands() /
                       self._data_type.size / config.io.tile_ratio()))
         tile_height = int(config.io.tile_ratio() * tile_width)
         min_block_size = self._chunk_size**2 * config.io.tile_ratio(
         ) * img.num_bands() * 4
         if max_block_bytes < min_block_size:
             print('Warning: max_block_bytes=%g MB, but %g MB is recommended (minimum: %g MB)' % ( \
                   max_block_bytes / 1024 / 1024, min_block_size * 2 / 1024 / 1024, min_block_size / 1024/ 1024),
                   file=sys.stderr)
         if tile_width < self._chunk_size or tile_height < self._chunk_size:
             raise ValueError('max_block_bytes is too low.')
         tiles = img.tiles(tile_width,
                           tile_height,
                           min_width=self._chunk_size,
                           min_height=self._chunk_size,
                           overlap=self._chunk_size - 1)
         random.Random(0).shuffle(
             tiles
         )  # gives consistent random ordering so labels will match
         tgs.append((i, tiles))
     while tgs:
         cur = tgs[:config.io.interleave_images()]
         tgs = tgs[config.io.interleave_images():]
         done = False
         while not done:
             done = True
             for it in cur:
                 if not it[1]:
                     continue
                 t = it[1].pop(0)
                 if t:
                     done = False
                     yield (it[0], t.min_x, t.min_y, t.max_x, t.max_y)
             if done:
                 break
Esempio n. 7
0
def main(options):

    # TODO: Share the way this is done with in ml/train.py
    cpuOnly = (config.general.gpus() == 0)

    if cpuOnly:
        with tf.device('/cpu:0'):
            model = tf.keras.models.load_model(
                options.model, custom_objects=delta.ml.layers.ALL_LAYERS)
    else:
        model = tf.keras.models.load_model(
            options.model, custom_objects=delta.ml.layers.ALL_LAYERS)

    colors = list(map(lambda x: x.color, config.dataset.classes))
    error_colors = np.array([[0x0, 0x0, 0x0], [0xFF, 0x00, 0x00]],
                            dtype=np.uint8)
    if options.noColormap:
        colors = None  # Forces raw one channel output

    start_time = time.time()
    images = config.dataset.images()
    labels = config.dataset.labels()

    if options.autoencoder:
        labels = None
    for (i, path) in enumerate(images):
        image = loader.load_image(images, i)
        base_name = os.path.splitext(os.path.basename(path))[0]
        output_image = tiff.DeltaTiffWriter('predicted_' + base_name + '.tiff')
        prob_image = None
        if options.prob:
            prob_image = tiff.DeltaTiffWriter('prob_' + base_name + '.tiff')
        error_image = None
        if labels:
            error_image = tiff.DeltaTiffWriter('errors_' + base_name + '.tiff')

        label = None
        if labels:
            label = loader.load_image(config.dataset.labels(), i)

        if options.autoencoder:
            label = image
            predictor = predict.ImagePredictor(model, output_image, True,
                                               (ae_convert, np.uint8, 3))
        else:
            predictor = predict.LabelPredictor(model,
                                               output_image,
                                               True,
                                               labels.nodata_value(),
                                               colormap=colors,
                                               prob_image=prob_image,
                                               error_image=error_image,
                                               error_colors=error_colors)

        try:
            if cpuOnly:
                with tf.device('/cpu:0'):
                    predictor.predict(image, label)
            else:
                predictor.predict(image, label)
        except KeyboardInterrupt:
            print('\nAborted.')
            return 0

        if labels:
            cm = predictor.confusion_matrix()
            print('%.2g%% Correct: %s' %
                  (np.sum(np.diag(cm)) / np.sum(cm) * 100, path))
            save_confusion(cm, map(lambda x: x.name, config.dataset.classes),
                           'confusion_' + base_name + '.pdf')

        if options.autoencoder:
            tiff.write_tiff('orig_' + base_name + '.tiff',
                            ae_convert(image.read()),
                            metadata=image.metadata())
    stop_time = time.time()
    print('Elapsed time = ', stop_time - start_time)
    return 0