def _apply_tile_transform(tile, contours):
        # assuming there's a single transform for the tile
        assert (len(tile.transforms) == 1)

        transform = tile.transforms[0]
        if DEBUG_NO_TRANSFOM:
            from rh_renderer import models
            transform = models.AffineModel()

        out_contours = np.empty_like(contours)
        for cnt_idx, cnt in enumerate(contours):
            cnt_pts = cnt.reshape((len(cnt), 2))
            cnt_pts_transformed = transform.apply(cnt_pts)
            cnt_pts_transformed = cnt_pts_transformed.reshape(
                (len(cnt_pts_transformed), 1, 2))
            out_contours[cnt_idx] = cnt_pts_transformed
        return out_contours
예제 #2
0
# Renders a given tilespec in 1/4 resolution
from __future__ import print_function
import pylab
from rh_renderer.tilespec_renderer import TilespecRenderer
import numpy as np
import time
import json
from rh_renderer import models

if __name__ == '__main__':
    ts_fname = 'tilespec_1tile.json'
    with open(ts_fname, 'r') as data:
        tilespec = json.load(data)

    # Create the tilespec renderer
    renderer1 = TilespecRenderer(tilespec)

    downsample = models.AffineModel(
        np.array([[0.25, 0., 0.], [0., 0.25, 0.], [0., 0., 1.]]))

    renderer1.add_transformation(downsample)

    start_time = time.time()
    img1, start_point1 = renderer1.render()
    print("Start point is at:", start_point1, "image shape:", img1.shape,
          "took: {} seconds".format(time.time() - start_time))
    pylab.figure()
    pylab.imshow(img1, cmap='gray', vmin=0., vmax=255.)

    pylab.show()
예제 #3
0
def render_tilespec(tile_fname,
                    output,
                    scale,
                    output_type,
                    in_bbox,
                    tile_size,
                    invert_image,
                    threads_num=1,
                    empty_placeholder=False,
                    hist_adjuster_alg_type=None,
                    from_to_cols_rows=None,
                    blend_type=BlendType.MULTI_BAND_SEAM):
    """Renders a given tilespec.
       If the in_bbox to_x/to_y values are -1, uses the tilespecs to determine the output size.
       If tile_size is 0, the output will be a single image, otherwise multiple tiles will be created.
       output is either a single filename to save the output in (using the output_type),
       or a prefix for the tiles output, which will be of the form: {prefix}_tr%d-tc%d.{output_type}
       and the row (tr) and column (tc) values will be one-based."""

    start_time = time.time()
    # Determine the output shape
    if in_bbox[1] == -1 or in_bbox[3] == -1:
        image_bbox = common.read_bboxes_grep(tile_fname)
        image_bbox[0] = max(image_bbox[0], in_bbox[0])
        image_bbox[2] = max(image_bbox[2], in_bbox[2])
        if in_bbox[1] > 0:
            image_bbox[1] = in_bbox[1]
        if in_bbox[3] > 0:
            image_bbox[3] = in_bbox[3]
    else:
        image_bbox = in_bbox

    scaled_bbox = [
        int(math.floor(image_bbox[0] * scale)),
        int(math.ceil(image_bbox[1] * scale)),
        int(math.floor(image_bbox[2] * scale)),
        int(math.ceil(image_bbox[3] * scale))
    ]
    # Set the post-scale out shape of the image
    out_shape = (scaled_bbox[1] - scaled_bbox[0],
                 scaled_bbox[3] - scaled_bbox[2])
    print("Final out_shape for the image: {}".format(out_shape))

    hist_adjuster = None
    #     if hist_adjuster_fname is not None:
    #         #reference_histogram = HistMatcher(histogram_fname=reference_histogram_fname, saturate_low_pct=0.001, saturate_high_pct=0.001)
    #         #reference_histogram = HistMatcher(histogram_fname=reference_histogram_fname)
    #         hist_adjuster = rh_renderer.normalization.hist_adjuster.load_adjuster(hist_adjuster_fname)
    #     elif hist_adjuster_alg_type is not None:
    #         if hist_adjuster_alg_type.upper() == 'CLAHE':
    #             hist_adjuster = HistogramCLAHE()
    if hist_adjuster_alg_type is not None:
        if hist_adjuster_alg_type.upper() == 'CLAHE':
            hist_adjuster = HistogramCLAHE()
        if hist_adjuster_alg_type.upper() == 'GB11CLAHE':
            hist_adjuster = HistogramGB11CLAHE()

    with open(tile_fname, 'r') as data:
        tilespec = ujson.load(data)
    renderer = TilespecRenderer(tilespec,
                                hist_adjuster=hist_adjuster,
                                dynamic=(scale != 1.0),
                                blend_type=blend_type)

    # FOR THE IARPA latest dataset
    #     trans_model = models.AffineModel(np.array([
    #                                              [1., 0., 13679.],
    #                                              [0., 1., 2108.],
    #                                              [0., 0., 1.]
    #                                             ]))
    #     renderer.add_transformation(trans_model)

    # Add the downsampling transformation
    if scale != 1.0:
        downsample = models.AffineModel(
            np.array([[scale, 0., 0.], [0., scale, 0.], [0., 0., 1.]]))
        renderer.add_transformation(downsample)

    # FOR THE IARPA R2B1 first dataset


#     rot = 0.29441193975
#     rot_model = models.AffineModel(np.array([
#                                              [math.cos(rot), -math.sin(rot), 0.],
#                                              [math.sin(rot), math.cos(rot), 0.],
#                                              [0., 0., 1.]
#                                             ]))
#     renderer.add_transformation(rot_model)

#     # FOR THE IARPA R2B1 Layers_1_2 dataset
#     rot = 0.3490658504
#     rot_model = models.AffineModel(np.array([
#                                              [math.cos(rot), -math.sin(rot), 0.],
#                                              [math.sin(rot), math.cos(rot), 0.],
#                                              [0., 0., 1.]
#                                             ]))
#     renderer.add_transformation(rot_model)

    if tile_size == 0:
        # no tiles, just render a single file
        out_fname = "{}.{}".format(os.path.splitext(output)[0], output_type)
        out_fname_empty = "{}_empty".format(out_fname)

        # Render the image
        img, start_point = renderer.crop(scaled_bbox[0], scaled_bbox[2],
                                         scaled_bbox[1] - 1,
                                         scaled_bbox[3] - 1)
        print("Rendered cropped and downsampled version")

        if empty_placeholder:
            if img is None or np.all(img == 0):
                # create the empty file, and return
                print("saving empty image {}".format(out_fname_empty))
                open(out_fname_empty, 'a').close()
                print("Rendering and saving empty file {} took {} seconds.".
                      format(out_fname_empty,
                             time.time() - start_time))
                return

        if img is None:
            # No actual image, set a blank image of the wanted size
            img = np.zeros((out_shape[1], out_shape1[0]), dtype=np.uint8)
            start_point = (0, 0)

        print("Padding image")
        img = pad_image(img, scaled_bbox[0], scaled_bbox[2], start_point)

        if invert_image:
            print("inverting image")
            img = 255 - img

        print("saving image {}".format(out_fname))
        cv2.imwrite(out_fname, img)
    else:
        # Tile the image
        rows = int(math.ceil(out_shape[1] / float(tile_size)))
        cols = int(math.ceil(out_shape[0] / float(tile_size)))

        from_row = 0
        from_col = 0
        to_row = rows
        to_col = cols
        if from_to_cols_rows is not None:
            from_col, from_row, to_col, to_row = from_to_cols_rows

        # Iterate over each row and column and save the tile
        for cur_row in range(from_row, to_row):
            from_y = scaled_bbox[2] + cur_row * tile_size
            to_y = min(scaled_bbox[2] + (cur_row + 1) * tile_size,
                       scaled_bbox[3])
            for cur_col in range(from_col, to_col):
                tile_start_time = time.time()
                out_fname = "{}_tr{}-tc{}.{}".format(output, str(cur_row + 1),
                                                     str(cur_col + 1),
                                                     output_type)
                out_fname_empty = "{}_empty".format(out_fname)
                from_x = scaled_bbox[0] + cur_col * tile_size
                to_x = min(scaled_bbox[0] + (cur_col + 1) * tile_size,
                           scaled_bbox[1])

                # Render the tile
                img, start_point = renderer.crop(from_x, from_y, to_x - 1,
                                                 to_y - 1)
                print("Rendered cropped and downsampled version")

                if empty_placeholder:
                    if img is None or np.all(img == 0):
                        # create the empty file, and return
                        print("saving empty image {}".format(out_fname_empty))
                        open(out_fname_empty, 'a').close()
                        continue

                if img is None:
                    # No actual image, set a blank image of the wanted size
                    img = np.zeros((to_y - from_y, to_x - from_x),
                                   dtype=np.uint8)
                    start_point = (from_y, from_x)

                print("Padding image")
                img = pad_image(img, from_x, from_y, start_point)

                if invert_image:
                    print("inverting image")
                    img = 255 - img

                print("saving image {}".format(out_fname))
                cv2.imwrite(out_fname, img)

                print(
                    "single tile rendering and saving to {} took {} seconds.".
                    format(out_fname,
                           time.time() - tile_start_time))

    print("Rendering and saving {} took {} seconds.".format(
        tile_fname,
        time.time() - start_time))
예제 #4
0
def render_tilespec(tile_fname, output, scale, output_type, in_bbox, tile_size, invert_image, threads_num=1, empty_placeholder=False, reference_histogram_fname=None, from_to_cols_rows=None):
    """Renders a given tilespec.
       If the in_bbox to_x/to_y values are -1, uses the tilespecs to determine the output size.
       If tile_size is 0, the output will be a single image, otherwise multiple tiles will be created.
       output is either a single filename to save the output in (using the output_type),
       or a prefix for the tiles output, which will be of the form: {prefix}_tr%d-tc%d.{output_type}
       and the row (tr) and column (tc) values will be one-based."""

    
    start_time = time.time()
    # Determine the output shape
    if in_bbox[1] == -1 or in_bbox[3] == -1:
        image_bbox = BoundingBox.read_bbox_grep(tile_fname)
        image_bbox.from_x = max(image_bbox.from_x, in_bbox[0])
        image_bbox.from_y = max(image_bbox.from_y, in_bbox[2])
        if in_bbox[1] > 0:
            image_bbox.to_x = in_bbox[1]
        if in_bbox[3] > 0:
            image_bbox.to_y = in_bbox[3]
    else:
        image_bbox = BoundingBox.fromList(in_bbox)

    scaled_bbox = BoundingBox(
                                int(math.floor(image_bbox.from_x * scale)),
                                int(math.ceil(image_bbox.to_x * scale)),
                                int(math.floor(image_bbox.from_y * scale)),
                                int(math.ceil(image_bbox.to_y * scale))
                             )
    # Set the post-scale out shape of the image
    out_shape = (scaled_bbox.to_x - scaled_bbox.from_x, scaled_bbox.to_y - scaled_bbox.from_y)
    print "Final out_shape for the image: {}".format(out_shape)

    reference_histogram = None
    if reference_histogram_fname is not None:
        #reference_histogram = HistMatcher(histogram_fname=reference_histogram_fname, saturate_low_pct=0.001, saturate_high_pct=0.001)
        reference_histogram = HistMatcher(histogram_fname=reference_histogram_fname)

    with open(tile_fname, 'r') as data:
        tilespec = json.load(data)
    renderer = TilespecRenderer(tilespec, hist_adjuster=reference_histogram)

    # Add the downsampling transformation
    downsample = models.AffineModel(np.array([
                                              [scale, 0., 0.],
                                              [0., scale, 0.],
                                              [0., 0., 1.]
                                             ]))
    renderer.add_transformation(downsample)

    if tile_size == 0:
        # no tiles, just render a single file
        out_fname = "{}.{}".format(os.path.splitext(output)[0], output_type)
        out_fname_empty = "{}_empty".format(out_fname)

        # Render the image
        img, start_point = renderer.crop(scaled_bbox.from_x, scaled_bbox.from_y, scaled_bbox.to_x - 1, scaled_bbox.to_y - 1)
        print "Rendered cropped and downsampled version"

        if empty_placeholder:
            if img is None or np.all(img == 0):
                # create the empty file, and return
                print "saving empty image {}".format(out_fname_empty)
                open(out_fname_empty, 'a').close()
                print "Rendering and saving empty file {} took {} seconds.".format(out_fname_empty, time.time() - start_time)
                return
                
        if img is None:
            # No actual image, set a blank image of the wanted size
            img = np.zeros((out_shape[1], out_shape[0]), dtype=np.uint8)
            start_point = (0, 0)

        print "Padding image"
        img = pad_image(img, scaled_bbox.from_x, scaled_bbox.from_y, start_point)

        if invert_image:
            print "inverting image"
            img = 255 - img

        print "saving image {}".format(out_fname)
        cv2.imwrite(out_fname, img)
    else:
        # Tile the image
        rows = int(math.ceil(out_shape[1] / float(tile_size)))
        cols = int(math.ceil(out_shape[0] / float(tile_size)))

        from_row = 0
        from_col = 0
        to_row = rows
        to_col = cols
        if from_to_cols_rows is not None:
            from_col, from_row, to_col, to_row = from_to_cols_rows

        # Iterate over each row and column and save the tile
        for cur_row in range(from_row, to_row):
            from_y = scaled_bbox.from_y + cur_row * tile_size
            to_y = min(scaled_bbox.from_y + (cur_row + 1) * tile_size, scaled_bbox.to_y)
            for cur_col in range(from_col, to_col):
                tile_start_time = time.time()
                out_fname = "{}_tr{}-tc{}.{}".format(output, str(cur_row + 1), str(cur_col + 1), output_type)
                out_fname_empty = "{}_empty".format(out_fname)
                from_x = scaled_bbox.from_x + cur_col * tile_size
                to_x = min(scaled_bbox.from_x + (cur_col + 1) * tile_size, scaled_bbox.to_x)
        
                # Render the tile
                img, start_point = renderer.crop(from_x, from_y, to_x - 1, to_y - 1)
                print "Rendered cropped and downsampled version"

                if empty_placeholder:
                    if img is None or np.all(img == 0):
                        # create the empty file, and return
                        print "saving empty image {}".format(out_fname_empty)
                        open(out_fname_empty, 'a').close()
                        continue
                
                if img is None:
                    # No actual image, set a blank image of the wanted size
                    img = np.zeros((to_y - from_y, to_x - from_x), dtype=np.uint8)
                    start_point = (from_y, from_x)

                print "Padding image"
                img = pad_image(img, from_x, from_y, start_point)

                if invert_image:
                    print "inverting image"
                    img = 255 - img

                print "saving image {}".format(out_fname)
                cv2.imwrite(out_fname, img)

                print "single tile rendering and saving to {} took {} seconds.".format(out_fname, time.time() - tile_start_time)

    print "Rendering and saving {} took {} seconds.".format(tile_fname, time.time() - start_time)
예제 #5
0
 def inverse_transform(model):
     mat = model.get_matrix()
     new_model = models.AffineModel(np.linalg.inv(mat))
     return new_model
예제 #6
0
    def match_and_filter(self, features_kps1, features_descs1, features_kps2,
                         features_descs2):
        match_points = self.match(features_kps1, features_descs1,
                                  features_kps2, features_descs2)

        if match_points is None:
            return None, None

        model, filtered_matches, mask = ransac.filter_matches(
            match_points,
            match_points,
            self._params['model_index'],
            self._params['iterations'],
            self._params['max_epsilon'],
            self._params['min_inlier_ratio'],
            self._params['min_num_inlier'],
            self._params['max_trust'],
            self._params['det_delta'],
            self._params['max_stretch'],
            self._params['max_rot_deg'],
            robust_filter=not self._params['avoid_robust_filter'],
            max_distance=self._params['max_distance'])

        if model is None:
            return None, None

        if self._params["use_regularizer"]:
            regularizer_model, _, _ = ransac.filter_matches(
                match_points,
                match_points,
                self._params['regularizer_model_index'],
                self._params['iterations'],
                self._params['max_epsilon'],
                self._params['min_inlier_ratio'],
                self._params['min_num_inlier'],
                self._params['max_trust'],
                self._params['det_delta'],
                self._params['max_stretch'],
                self._params['max_rot_deg'],
                robust_filter=not self._params['avoid_robust_filter'],
                max_distance=self._params['max_distance'])

            if regularizer_model is None:
                return None, None

            result = model.get_matrix() * (
                1 - self._params["regularizer_lambda"]
            ) + regularizer_model.get_matrix(
            ) * self._params["regularizer_lambda"]
            model = models.AffineModel(result)

        if self._params['best_k_matches'] > 0 and self._params[
                'best_k_matches'] < len(filtered_matches[0]):
            # Only keep the best K matches out of the filtered matches
            best_k_matches_idxs = np.argpartition(
                match_points[2][mask], -self._params['best_k_matches']
            )[-self._params['best_k_matches']:]
            filtered_matches = np.array([
                match_points[0][mask][best_k_matches_idxs],
                match_points[1][mask][best_k_matches_idxs]
            ])

        return model, filtered_matches