示例#1
0
 def __init__(self, **kwargs):
     self._kwargs = kwargs
     self._blob_detector = BlobDetector2D.create_detector(
         **self._kwargs.get("blob_detector", {}))
     self._matcher = FeaturesMatcher(
         BlobDetector2D.create_matcher,
         **self._kwargs.get("matcher_params", {}))
    def match_mfovs_features(matcher_params, sec1_cache, sec2_cache, mfovs1,
                             mfovs2):
        """
        Matches the features in mfovs1 (of sec1) to the features in mfovs2 (of sec2).
        This method is run by a process that loads the matcher from its local thread storage.
        """

        thread_local_store = ThreadLocalStorageLRU()
        if 'matcher' in thread_local_store.keys():
            matcher = thread_local_store['matcher']
        else:
            # Initialize the matcher, and store it in the local thread storage
            matcher = FeaturesMatcher(BlobDetector2D.create_matcher,
                                      **matcher_params)
            thread_local_store['matcher'] = matcher

#         matcher = getattr(threadLocal, 'matcher', None)
#         if matcher is None:
#             # Initialize the matcher, and store it in the local thread storage
#             matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)
#             threadLocal.matcher = matcher

        def get_kps_descs(mfovs, sec_cache):
            mfovs = list(mfovs)
            if len(mfovs) == 1:
                mfovs_kps = np.array(sec_cache["pre_match_blobs/" +
                                               str(mfovs[0])][0])
                mfovs_descs = np.array(sec_cache["pre_match_blobs/" +
                                                 str(mfovs[0])][1])
            else:
                mfovs_kps_arrays = []
                mfovs_descs_arrays = []
                for mfov in mfovs:
                    kps_descs = sec_cache["pre_match_blobs/" + str(mfov)]
                    if len(kps_descs[0]) > 0:
                        mfovs_kps_arrays.append(kps_descs[0])
                        mfovs_descs_arrays.append(kps_descs[1])
                if len(mfovs_kps_arrays) == 0:
                    mfovs_kps = np.array([])
                    mfovs_descs = np.array([])
                elif len(mfovs_kps_arrays) == 1:
                    mfovs_kps = mfovs_kps_arrays[0]
                    mfovs_descs = mfovs_descs_arrays[0]
                else:
                    mfovs_kps = np.vstack(mfovs_kps_arrays)
                    mfovs_descs = np.vstack(mfovs_descs_arrays)
            return np.array(mfovs_kps), np.array(mfovs_descs)

        mfovs1_kps, mfovs1_descs = get_kps_descs(mfovs1, sec1_cache)
        mfovs2_kps, mfovs2_descs = get_kps_descs(mfovs2, sec2_cache)

        model, filtered_matches = matcher.match_and_filter(
            mfovs1_kps, mfovs1_descs, mfovs2_kps, mfovs2_descs)
        return mfovs1, model, filtered_matches
示例#3
0
        def __init__(self, sec1, sec2, sec1_to_sec2_transform,
                     sec1_cache_features, sec2_cache_features, **kwargs):
            #self._scaling = kwargs.get("scaling", 0.2)
            self._template_size = kwargs.get("template_size", 200)
            self._search_window_size = kwargs.get("search_window_size",
                                                  8 * self._template_size)
            #logger.report_event("Actual template size: {} and window search size: {}".format(self._template_size, self._search_window_size), log_level=logging.INFO)

            # Parameters for PMCC filtering
            # self._min_corr = kwargs.get("min_correlation", 0.2)
            # self._max_curvature = kwargs.get("maximal_curvature_ratio", 10)
            # self._max_rod = kwargs.get("maximal_ROD", 0.9)
            # self._use_clahe = kwargs.get("use_clahe", False)
            # if self._use_clahe:
            #     self._clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))

            #self._debug_dir = kwargs.get("debug_dir", None)
            self._debug_save_matches = None

            detector_type = kwargs.get("detector_type",
                                       FeaturesDetector.Type.ORB.name)
            #self._detector = FeaturesDetector(detector_type, **kwargs.get("detector_params", {}))
            self._matcher = FeaturesMatcher(
                FeaturesDetector.get_matcher_init_fn(detector_type),
                **kwargs.get("matcher_params", {}))

            self._template_side = self._template_size / 2
            self._search_window_side = self._search_window_size / 2

            self._sec1 = sec1
            self._sec2 = sec2
            self._sec1_to_sec2_transform = sec1_to_sec2_transform
            self._inverse_model = FeaturesBlockMultipleMatcherDispatcher.FeaturesBlockMultipleMatcher.inverse_transform(
                self._sec1_to_sec2_transform)

            self._sec1_cache_features = sec1_cache_features
            self._sec2_cache_features = sec2_cache_features

            # Create an rtree for each section's tiles, to quickly find the relevant tiles
            self._sec1_tiles_rtree = FeaturesBlockMultipleMatcherDispatcher.FeaturesBlockMultipleMatcher._create_tiles_bbox_rtree(
                sec1)
            self._sec2_tiles_rtree = FeaturesBlockMultipleMatcherDispatcher.FeaturesBlockMultipleMatcher._create_tiles_bbox_rtree(
                sec2)
示例#4
0
class PreMatch3DFullSectionThenMfovsBlobs(object):
    """
    Performs a section to section pre-matching by detecting blobs in each section,
    then performing a global section matching, and then a per-mfov (of sec1) local refinement of the matches.
    """

    OVERLAP_DELTAS = np.array([[0, 0], [0, -10000], [-5000, 5000], [
        5000, 5000
    ]])  # The location of the points relative to the center of an mfov

    def __init__(self, **kwargs):
        self._kwargs = kwargs
        self._blob_detector = BlobDetector2D.create_detector(
            **self._kwargs.get("blob_detector", {}))
        self._matcher = FeaturesMatcher(
            BlobDetector2D.create_matcher,
            **self._kwargs.get("matcher_params", {}))

    @staticmethod
    def detect_mfov_blobs(blob_detector_args, mfov):
        """
        Receives a tilespec of an mfov (all the tiles in that mfov),
        detects the blobs on each of the thumbnails of the mfov tiles,
        and returns the locations of the blobs (in stitched global coordinates), and their
        descriptors.
        """
        thread_local_store = ThreadLocalStorageLRU()
        if 'blob_detector' not in thread_local_store.keys():
            # Initialize the blob_detector, and store it in the local thread storage
            blob_detector = BlobDetector2D.create_detector(
                **blob_detector_args)
            thread_local_store['blob_detector'] = blob_detector
        else:
            blob_detector = thread_local_store['blob_detector']

#         blob_detector = getattr(threadLocal, 'blob_detector', None)
#         if blob_detector is None:
#             # Initialize the blob_detector, and store it in the local thread storage
#             blob_detector = BlobDetector2D.create_detector(**blob_detector_args)
#             threadLocal.blob_detector = blob_detector

        ds_rate = blob_detector_args.get("ds_rate", None)

        all_kps_descs = [[], []]
        for tile in mfov.tiles():
            #thumb_img_fname = "thumbnail_{}.jpg".format(os.path.splitext(os.path.basename(tile.img_fname))[0])
            #thumb_img_fname = os.path.join(os.path.dirname(tile.img_fname), thumb_img_fname)
            # Read the tile
            #thumb_img = cv2.imread(thumb_img_fname, 0)
            img = tile.image
            if ds_rate is not None:
                img = cv2.resize(img,
                                 None,
                                 fx=ds_rate,
                                 fy=ds_rate,
                                 interpolation=cv2.INTER_AREA)
            kps, descs = blob_detector.detectAndCompute(img)

            if len(kps) == 0:
                continue

            kps_pts = np.empty((len(kps), 2), dtype=np.float64)
            for kp_i, kp in enumerate(kps):
                kps_pts[kp_i][:] = kp.pt
            # upsample the thumbnail coordinates to original tile coordinates
            if ds_rate is not None:
                kps_pts[:, 0] /= ds_rate
                kps_pts[:, 1] /= ds_rate

            # Apply the transformation to the points
            assert (len(tile.transforms) == 1)
            model = tile.transforms[0]
            kps_pts = model.apply(kps_pts)

            all_kps_descs[0].extend(kps_pts)
            all_kps_descs[1].extend(descs)

        logger.report_event("Found {} blobs in section {}, mfov {}".format(
            len(all_kps_descs[0]), mfov.layer, mfov.mfov_index),
                            log_level=logging.INFO)
        return mfov.mfov_index, all_kps_descs

    def compute_section_blobs(self, sec, sec_cache, pool):
        # Create nested caches is needed
        if "pre_match_blobs" not in sec_cache:
            #sec_cache.create_dict("pre_match_blobs")
            sec_cache["pre_match_blobs"] = {}

        total_features_num = 0
        # create the mfovs blob computation jobs
        async_results = []
        for mfov in sec.mfovs():
            if mfov in sec_cache["pre_match_blobs"]:
                continue
            res = pool.apply_async(
                PreMatch3DFullSectionThenMfovsBlobs.detect_mfov_blobs,
                (self._kwargs.get("blob_detector", {}), mfov))
            async_results.append(res)

        for res in async_results:
            mfov_index, mfov_kps_descs = res.get()
            #sec_cache["pre_match_blobs"].create_dict(mfov_index)
            sec_cache["pre_match_blobs"][mfov_index] = mfov_kps_descs
            total_features_num += len(mfov_kps_descs[0])

        return total_features_num

    @staticmethod
    def collect_all_features(sec_cache):

        # TODO - need to see if pre-allocation can improve speed
        all_kps_arrays = [
            kps_descs[0]
            for kps_descs in sec_cache["pre_match_blobs"].values()
            if len(kps_descs[0]) > 0
        ]
        all_descs_arrays = [
            kps_descs[1]
            for kps_descs in sec_cache["pre_match_blobs"].values()
            if len(kps_descs[1]) > 0
        ]
        return np.vstack(all_kps_arrays), np.vstack(all_descs_arrays)

    @staticmethod
    def get_overlapping_mfovs(mfov1, sec2, sec1_to_sec2_model, sec2_rtree):
        # TODO - for single beam data, it might be better to take the boundaries of all tiles in mfov1,
        #        and return their overlapping mfovs on sec2
        # Take mfov1's center
        mfov1_center = np.array([(mfov1.bbox[0] + mfov1.bbox[1]) / 2,
                                 (mfov1.bbox[2] + mfov1.bbox[3]) / 2])

        # Add the triangle points
        sec1_points = PreMatch3DFullSectionThenMfovsBlobs.OVERLAP_DELTAS + mfov1_center
        sec1_on_sec2_points = sec1_to_sec2_model.apply(sec1_points)
        overlapping_mfovs = set()
        for sec1_on_sec2_point in sec1_on_sec2_points:
            rect_res = sec2_rtree.search([
                sec1_on_sec2_point[0], sec1_on_sec2_point[0] + 1,
                sec1_on_sec2_point[1], sec1_on_sec2_point[1] + 1
            ])
            for other_t in rect_res:
                overlapping_mfovs.add(other_t.mfov_index)
        return overlapping_mfovs

    @staticmethod
    def match_mfovs_features(matcher_params, sec1_cache, sec2_cache, mfovs1,
                             mfovs2):
        """
        Matches the features in mfovs1 (of sec1) to the features in mfovs2 (of sec2).
        This method is run by a process that loads the matcher from its local thread storage.
        """

        thread_local_store = ThreadLocalStorageLRU()
        if 'matcher' in thread_local_store.keys():
            matcher = thread_local_store['matcher']
        else:
            # Initialize the matcher, and store it in the local thread storage
            matcher = FeaturesMatcher(BlobDetector2D.create_matcher,
                                      **matcher_params)
            thread_local_store['matcher'] = matcher

#         matcher = getattr(threadLocal, 'matcher', None)
#         if matcher is None:
#             # Initialize the matcher, and store it in the local thread storage
#             matcher = FeaturesMatcher(BlobDetector2D.create_matcher, **matcher_params)
#             threadLocal.matcher = matcher

        def get_kps_descs(mfovs, sec_cache):
            mfovs = list(mfovs)
            if len(mfovs) == 1:
                mfovs_kps = np.array(sec_cache["pre_match_blobs"][mfovs[0]][0])
                mfovs_descs = np.array(
                    sec_cache["pre_match_blobs"][mfovs[0]][1])
            else:
                mfovs_kps_arrays = []
                mfovs_descs_arrays = []
                for mfov in mfovs:
                    kps_descs = sec_cache["pre_match_blobs"][mfov]
                    if len(kps_descs[0]) > 0:
                        mfovs_kps_arrays.append(kps_descs[0])
                        mfovs_descs_arrays.append(kps_descs[1])
                if len(mfovs_kps_arrays) == 0:
                    mfovs_kps = np.array([])
                    mfovs_descs = np.array([])
                elif len(mfovs_kps_arrays) == 1:
                    mfovs_kps = mfovs_kps_arrays[0]
                    mfovs_descs = mfovs_descs_arrays[0]
                else:
                    mfovs_kps = np.vstack(mfovs_kps_arrays)
                    mfovs_descs = np.vstack(mfovs_descs_arrays)
            return np.array(mfovs_kps), np.array(mfovs_descs)

        mfovs1_kps, mfovs1_descs = get_kps_descs(mfovs1, sec1_cache)
        mfovs2_kps, mfovs2_descs = get_kps_descs(mfovs2, sec2_cache)

        model, filtered_matches = matcher.match_and_filter(
            mfovs1_kps, mfovs1_descs, mfovs2_kps, mfovs2_descs)
        return mfovs1, model, filtered_matches

    def pre_match_sections(self, sec1, sec2, sec1_cache, sec2_cache, pool):
        """
        Performs a section to section pre-matching by detecting blobs in each section,
        then performing a global section matching, and then a per-mfov (of sec1) refinement of the matches.
        Returns a map between an mfov of sec1, and a tuple that holds its transformation model to sec2, and the filtered_matches
        """
        pre_match_res = {}

        # dispatch blob computation
        sec1_features_num = self.compute_section_blobs(sec1, sec1_cache, pool)
        sec2_features_num = self.compute_section_blobs(sec2, sec2_cache, pool)

        # compute a section to section global affine transform
        # collect all features for each section
        sec1_kps, sec1_descs = PreMatch3DFullSectionThenMfovsBlobs.collect_all_features(
            sec1_cache)
        sec2_kps, sec2_descs = PreMatch3DFullSectionThenMfovsBlobs.collect_all_features(
            sec2_cache)

        global_model, global_filtered_matches = self._matcher.match_and_filter(
            sec1_kps, sec1_descs, sec2_kps, sec2_descs)
        if global_model is None:
            logger.report_event(
                "No global model found between section {} (all mfovs) and section {} (all mfovs)"
                .format(sec1.canonical_section_name,
                        sec2.canonical_section_name),
                log_level=logging.WARNING)
            # TODO - write to log, and return None
            return None
        logger.report_event(
            "Global model found between section {} (all mfovs) and section {} (all mfovs):\n{}"
            .format(sec1.canonical_section_name, sec2.canonical_section_name,
                    global_model.get_matrix()),
            log_level=logging.INFO)
        print(
            "DECOMPOSED MATRIX: ",
            mb_aligner.common.ransac.decompose_affine_matrix(
                global_model.get_matrix()))

        if sec1.mfovs_num == 1:
            logger.report_event(
                "Section {} has a single mfov, using the global model between section {} and section {}:\n{}"
                .format(sec1.canonical_section_name,
                        sec1.canonical_section_name,
                        sec2.canonical_section_name,
                        global_model.get_matrix()),
                log_level=logging.INFO)

            mfov_index = next(sec1.mfovs()).mfov_index

            pre_match_res[mfov_index] = (global_model, global_filtered_matches)
            return pre_match_res

        # Create section2 tile's bounding box rtree, so it would be faster to search it
        # TODO - maybe store it in cache, because it might be used by other comparisons of this section
        sec2_rtree = tinyr.RTree(interleaved=False, max_cap=5, min_cap=2)
        for t in sec2.tiles():
            sec2_rtree.insert(t, t.bbox)

        # refine the global transform to a local one
        async_results = []
        for mfov1 in sec1.mfovs():
            # find overlapping mfovs in sec2
            mfovs2 = PreMatch3DFullSectionThenMfovsBlobs.get_overlapping_mfovs(
                mfov1, sec2, global_model, sec2_rtree)
            logger.report_event(
                "Finding local model between section {} (mfov {}) and section {} (mfovs {})"
                .format(sec1.canonical_section_name, mfov1.mfov_index,
                        sec2.canonical_section_name, mfovs2),
                log_level=logging.INFO)
            # Note - the match_mfovs_features only reads from secX_cache, so we can send secX_cache._dict (the manager's part of that)
            res = pool.apply_async(
                PreMatch3DFullSectionThenMfovsBlobs.match_mfovs_features,
                (self._kwargs.get("matcher_params", {}), sec1_cache._dict,
                 sec2_cache._dict, [mfov1.mfov_index], mfovs2))
            async_results.append(res)

        for res in async_results:
            mfovs1, mfovs1_model, mfovs1_filtered_matches = res.get()
            assert (len(mfovs1) == 1)
            mfov_index = mfovs1[0]

            if mfovs1_model is None:
                logger.report_event(
                    "No local model found between section {} (mfov {}) and section {}"
                    .format(sec1.canonical_section_name, mfov_index,
                            sec2.canonical_section_name),
                    log_level=logging.INFO)
            else:
                logger.report_event(
                    "Found local model between section {} (mfov {}) and section {}:\n{}"
                    .format(sec1.canonical_section_name, mfov_index,
                            sec2.canonical_section_name,
                            mfovs1_model.get_matrix()),
                    log_level=logging.INFO)
                print(
                    "DECOMPOSED MATRIX: ",
                    mb_aligner.common.ransac.decompose_affine_matrix(
                        mfovs1_model.get_matrix()))
            pre_match_res[mfov_index] = (mfovs1_model, mfovs1_filtered_matches)

        return pre_match_res
示例#5
0
 def create_2d_matcher(self):
     matcher_params = self._conf.get('matcher_params', {})
     # The matcher type is actually determined by the detector type (the detector's output)
     matcher_init_fn = FeaturesDetector.get_matcher_init_fn(
         self._conf['detector_type'])
     return FeaturesMatcher(matcher_init_fn, **matcher_params)
示例#6
0
    class FeaturesBlockMultipleMatcher(object):
        def __init__(self, sec1, sec2, sec1_to_sec2_transform,
                     sec1_cache_features, sec2_cache_features, **kwargs):
            #self._scaling = kwargs.get("scaling", 0.2)
            self._template_size = kwargs.get("template_size", 200)
            self._search_window_size = kwargs.get("search_window_size",
                                                  8 * self._template_size)
            #logger.report_event("Actual template size: {} and window search size: {}".format(self._template_size, self._search_window_size), log_level=logging.INFO)

            # Parameters for PMCC filtering
            # self._min_corr = kwargs.get("min_correlation", 0.2)
            # self._max_curvature = kwargs.get("maximal_curvature_ratio", 10)
            # self._max_rod = kwargs.get("maximal_ROD", 0.9)
            # self._use_clahe = kwargs.get("use_clahe", False)
            # if self._use_clahe:
            #     self._clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))

            #self._debug_dir = kwargs.get("debug_dir", None)
            self._debug_save_matches = None

            detector_type = kwargs.get("detector_type",
                                       FeaturesDetector.Type.ORB.name)
            #self._detector = FeaturesDetector(detector_type, **kwargs.get("detector_params", {}))
            self._matcher = FeaturesMatcher(
                FeaturesDetector.get_matcher_init_fn(detector_type),
                **kwargs.get("matcher_params", {}))

            self._template_side = self._template_size / 2
            self._search_window_side = self._search_window_size / 2

            self._sec1 = sec1
            self._sec2 = sec2
            self._sec1_to_sec2_transform = sec1_to_sec2_transform
            self._inverse_model = FeaturesBlockMultipleMatcherDispatcher.FeaturesBlockMultipleMatcher.inverse_transform(
                self._sec1_to_sec2_transform)

            self._sec1_cache_features = sec1_cache_features
            self._sec2_cache_features = sec2_cache_features

            # Create an rtree for each section's tiles, to quickly find the relevant tiles
            self._sec1_tiles_rtree = FeaturesBlockMultipleMatcherDispatcher.FeaturesBlockMultipleMatcher._create_tiles_bbox_rtree(
                sec1)
            self._sec2_tiles_rtree = FeaturesBlockMultipleMatcherDispatcher.FeaturesBlockMultipleMatcher._create_tiles_bbox_rtree(
                sec2)

            ####self._sec1_scaled_renderer.add_transformation(self._sec1_to_sec2_transform.get_matrix())

        @staticmethod
        def _create_tiles_bbox_rtree(sec):
            sec_tiles_rtree = tinyr.RTree(interleaved=False,
                                          max_cap=5,
                                          min_cap=2)
            for t_idx, t in enumerate(sec.tiles()):
                bbox = t.bbox
                # using the (x_min, x_max, y_min, y_max) notation
                sec_tiles_rtree.insert(t_idx, bbox)
            return sec_tiles_rtree

        def set_debug_dir(self, debug_dir):
            self._debug_save_matches = True
            self._debug_dir = debug_dir

        @staticmethod
        def inverse_transform(model):
            mat = model.get_matrix()
            new_model = models.AffineModel(np.linalg.inv(mat))
            return new_model

        @staticmethod
        def _fetch_sec_features(sec, sec_tiles_rtree, sec_cache_features,
                                bbox):
            relevant_features = [[], []]
            rect_res = sec_tiles_rtree.search(bbox)
            for t_idx in rect_res:
                k = "{}_t{}".format(sec.canonical_section_name, t_idx)
                assert (k in sec_cache_features)
                tile_features_kps, tile_features_descs = sec_cache_features[k]
                # find all the features that are overlap with the bbox
                bbox_mask = (tile_features_kps[:, 0] >= bbox[0]) & (tile_features_kps[:, 0] <= bbox[1]) &\
                            (tile_features_kps[:, 1] >= bbox[2]) & (tile_features_kps[:, 1] <= bbox[3])
                if np.any(bbox_mask):
                    relevant_features[0].append(tile_features_kps[bbox_mask])
                    relevant_features[1].append(tile_features_descs[bbox_mask])
            if len(relevant_features[0]) == 0:
                return relevant_features  # [[], []]
            return np.vstack(relevant_features[0]), np.vstack(
                relevant_features[1])

        def match_sec1_to_sec2_mfov(self, sec1_pts):
            valid_matches = [[], [], []]
            invalid_matches = [[], []]
            if len(sec1_pts) == 0:
                return valid_matches, invalid_matches

            sec1_pts = np.atleast_2d(sec1_pts)

            # Apply the mfov transformation to compute estimated location on sec2
            sec1_mfov_pts_on_sec2 = self._sec1_to_sec2_transform.apply(
                sec1_pts)

            for sec1_pt, sec2_pt_estimated in zip(sec1_pts,
                                                  sec1_mfov_pts_on_sec2):

                # Fetch the template around sec1_point (before transformation)
                from_x1, from_y1 = sec1_pt - self._template_side
                to_x1, to_y1 = sec1_pt + self._template_side
                sec1_pt_features_kps, sec1_pt_features_descs = FeaturesBlockMultipleMatcherDispatcher.FeaturesBlockMultipleMatcher._fetch_sec_features(
                    self._sec1, self._sec1_tiles_rtree,
                    self._sec1_cache_features,
                    (from_x1, to_x1, from_y1, to_y1))

                if len(sec1_pt_features_kps) <= 1:
                    continue

                # Fetch a large sub-image around img2_point (using search_window_scaled_size)
                from_x2, from_y2 = sec2_pt_estimated - self._search_window_side
                to_x2, to_y2 = sec2_pt_estimated + self._search_window_side
                sec2_pt_est_features_kps, sec2_pt_est_features_descs = FeaturesBlockMultipleMatcherDispatcher.FeaturesBlockMultipleMatcher._fetch_sec_features(
                    self._sec2, self._sec2_tiles_rtree,
                    self._sec2_cache_features,
                    (from_x2, to_x2, from_y2, to_y2))

                if len(sec2_pt_est_features_kps) <= 1:
                    continue

                # apply the transformation on sec1 feature points locations
                sec1_pt_features_kps = self._sec1_to_sec2_transform.apply(
                    sec1_pt_features_kps)
                # Match the features
                transform_model, filtered_matches = self._matcher.match_and_filter(
                    sec1_pt_features_kps, sec1_pt_features_descs,
                    sec2_pt_est_features_kps, sec2_pt_est_features_descs)
                if transform_model is None:
                    invalid_matches[0].append(sec1_pt)
                    invalid_matches[1].append(1)
                else:
                    logger.report_event(
                        "{}: match found around area: {} (sec1) and {} (sec2) with {} matches"
                        .format(os.getpid(), sec1_pt, sec2_pt_estimated,
                                len(filtered_matches[0])),
                        log_level=logging.DEBUG)
                    # Take the matched points and apply the inverse transform on sec1_pts to get them back to sec1 locations
                    matches_pts_sec1 = self._inverse_model.apply(
                        filtered_matches[0])
                    # add all the matched points
                    valid_matches[0].extend(matches_pts_sec1)
                    valid_matches[1].extend(filtered_matches[1])
                    valid_matches[2].extend(
                        [len(filtered_matches[0]) / len(sec1_pt_features_kps)
                         ] * len(matches_pts_sec1))

#                 if sec1_template.shape[0] >= sec2_search_window.shape[0] or sec1_template.shape[1] >= sec2_search_window.shape[1]:
#                     continue
#                 if self._use_clahe:
#                     sec2_search_window_clahe = self._clahe.apply(sec2_search_window)
#                     sec1_template_clahe = self._clahe.apply(sec1_template)
#                     pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec2_search_window_clahe, sec1_template_clahe, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
#                 else:
#                     pmcc_result, reason, match_val = PMCC_filter.PMCC_match(sec2_search_window, sec1_template, min_correlation=self._min_corr, maximal_curvature_ratio=self._max_curvature, maximal_ROD=self._max_rod)
#
#                 if pmcc_result is None:
#                     invalid_matches[0].append(sec1_pt)
#                     invalid_matches[1].append(reason)
# #                     debug_out_fname1 = "temp_debug/debug_match_sec1{}-{}_template.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt_estimated[0]), int(sec2_pt_estimated[1]))
# #                     debug_out_fname2 = "temp_debug/debug_match_sec1{}-{}_search_window.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt_estimated[0]), int(sec2_pt_estimated[1]))
# #                     cv2.imwrite(debug_out_fname1, sec1_template)
# #                     cv2.imwrite(debug_out_fname2, sec2_search_window)
#                 else:
#                     # Compute the location of the matched point on img2 in non-scaled coordinates
#                     matched_location_scaled = np.array([reason[1], reason[0]]) + np.array([from_x2, from_y2]) + self._template_scaled_side
#                     sec2_pt = matched_location_scaled / self._scaling
#                     logger.report_event("{}: match found: {} and {} (orig assumption: {})".format(os.getpid(), sec1_pt, sec2_pt, sec2_pt_estimated / self._scaling), log_level=logging.DEBUG)
#                     if self._debug_save_matches:
#                         debug_out_fname1 = os.path.join(self._debug_dir, "debug_match_sec1_{}-{}_sec2_{}-{}_image1.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt[0]), int(sec2_pt[1])))
#                         debug_out_fname2 = os.path.join(self._debug_dir, "debug_match_sec1_{}-{}_sec2_{}-{}_image2.png".format(int(sec1_pt[0]), int(sec1_pt[1]), int(sec2_pt[0]), int(sec2_pt[1])))
#                         cv2.imwrite(debug_out_fname1, sec1_template)
#                         sec2_cut_out = sec2_search_window[int(reason[0]):int(reason[0] + 2 * self._template_scaled_side), int(reason[1]):int(reason[1] + 2 * self._template_scaled_side)]
#                         cv2.imwrite(debug_out_fname2, sec2_cut_out)
#                     valid_matches[0].append(np.array(sec1_pt))
#                     valid_matches[1].append(sec2_pt)
#                     valid_matches[2].append(match_val)

            return valid_matches, invalid_matches

        def match_sec2_to_sec1_mfov(self, sec2_pts):
            valid_matches = [[], [], []]
            invalid_matches = [[], []]
            if len(sec2_pts) == 0:
                return valid_matches, invalid_matches

            # Assume that only sec1 renderer was transformed and not sec2 (and both scaled)
            sec2_pts = np.atleast_2d(sec2_pts)

            #mat = self._sec1_to_sec2_transform.get_matrix()
            #inverse_mat = np.linalg.inv(mat)

            sec2_pts_on_sec1 = self._inverse_model.apply(sec2_pts)

            for sec2_pt, sec1_pt_estimated in zip(sec2_pts, sec2_pts_on_sec1):

                # Fetch the template around sec2_pt
                from_x2, from_y2 = sec2_pt - self._template_side
                to_x2, to_y2 = sec2_pt + self._template_side
                sec2_pt_features_kps, sec2_pt_features_descs = FeaturesBlockMultipleMatcherDispatcher.FeaturesBlockMultipleMatcher._fetch_sec_features(
                    self._sec2, self._sec2_tiles_rtree,
                    self._sec2_cache_features,
                    (from_x2, to_x2, from_y2, to_y2))

                if len(sec2_pt_features_kps) <= 1:
                    continue

                # Fetch a large sub-image around sec1_pt_estimated (after transformation, using search_window_scaled_size)
                from_x1, from_y1 = sec1_pt_estimated - self._search_window_side
                to_x1, to_y1 = sec1_pt_estimated + self._search_window_side
                sec1_pt_est_features_kps, sec1_pt_est_features_descs = FeaturesBlockMultipleMatcherDispatcher.FeaturesBlockMultipleMatcher._fetch_sec_features(
                    self._sec1, self._sec1_tiles_rtree,
                    self._sec1_cache_features,
                    (from_x1, to_x1, from_y1, to_y1))

                if len(sec1_pt_est_features_kps) <= 1:
                    continue

                # apply the inverse transformation on sec2 feature points locations
                sec2_pt_features_kps = self._inverse_model.apply(
                    sec2_pt_features_kps)
                # Match the features
                transform_model, filtered_matches = self._matcher.match_and_filter(
                    sec2_pt_features_kps, sec2_pt_features_descs,
                    sec1_pt_est_features_kps, sec1_pt_est_features_descs)
                if transform_model is None:
                    invalid_matches[0].append(sec2_pt)
                    invalid_matches[1].append(1)
                else:
                    logger.report_event(
                        "{}: match found around area: {} (sec2) and {} (sec1) with {} matches"
                        .format(os.getpid(), sec2_pt, sec1_pt_estimated,
                                len(filtered_matches[0])),
                        log_level=logging.DEBUG)
                    # Take the matched points and apply the inverse transform on sec1_pts to get them back to sec1 locations
                    matches_pts_sec2 = self._sec1_to_sec2_transform.apply(
                        filtered_matches[0])
                    # add all the matched points
                    valid_matches[0].extend(matches_pts_sec2)
                    valid_matches[1].extend(filtered_matches[1])
                    valid_matches[2].extend(
                        [len(filtered_matches[0]) / len(sec2_pt_features_kps)
                         ] * len(matches_pts_sec2))

            return valid_matches, invalid_matches