Example #1
0
    def align(self, img, shift_only=False, return_all=False, double_check=False, relative=False, orsa=False):
        """
        Align image on reference image

        :param img: numpy array containing the image to align to reference
        :param return_all: return in addition ot the image, keypoints, matching keypoints, and transformations as a dict
        :param reltive: update reference keypoints with those from current image to perform relative alignment
        :return: aligned image or all informations
        """
        logger.debug("ref_keypoints: %s" % self.ref_kp.size)
        if self.RGB:
            data = numpy.ascontiguousarray(img, numpy.uint8)
        else:
            data = numpy.ascontiguousarray(img, numpy.float32)
        with self.sem:
            cpy = pyopencl.enqueue_copy(self.queue, self.buffers["input"].data, data)
            if self.profile:self.events.append(("Copy H->D", cpy))
            cpy.wait()
            kp = self.sift.keypoints(self.buffers["input"])
#            print("ref %s img %s" % (self.buffers["ref_kp_gpu"].shape, kp.shape))
            logger.debug("mod image keypoints: %s" % kp.size)
            raw_matching = self.match.match(self.buffers["ref_kp_gpu"], kp, raw_results=True)
#            print(raw_matching.max(axis=0))

            matching = numpy.recarray(shape=raw_matching.shape, dtype=MatchPlan.dtype_kp)
            len_match = raw_matching.shape[0]
            if len_match == 0:
                logger.warning("No matching keypoints")
                return
            matching[:, 0] = self.ref_kp[raw_matching[:, 0]]
            matching[:, 1] = kp[raw_matching[:, 1]]

            if orsa:
                if feature:
                    matching = feature.sift_orsa(matching, self.shape, 1)
                else:
                    logger.warning("feature is not available. No ORSA filtering")

            if (len_match < 3 * 6) or (shift_only):  # 3 points per DOF
                if shift_only:
                    logger.debug("Shift Only mode: Common keypoints: %s" % len_match)
                else:
                    logger.warning("Shift Only mode: Common keypoints: %s" % len_match)
                dx = matching[:, 1].x - matching[:, 0].x
                dy = matching[:, 1].y - matching[:, 0].y
                matrix = numpy.identity(2, dtype=numpy.float32)
                offset = numpy.array([+numpy.median(dy), +numpy.median(dx)], numpy.float32)
            else:
                logger.debug("Common keypoints: %s" % len_match)

                transform_matrix = matching_correction(matching)
                offset = numpy.array([transform_matrix[5], transform_matrix[2]], dtype=numpy.float32)
                matrix = numpy.empty((2, 2), dtype=numpy.float32)
                matrix[0, 0], matrix[0, 1] = transform_matrix[4], transform_matrix[3]
                matrix[1, 0], matrix[1, 1] = transform_matrix[1], transform_matrix[0]
            if double_check and (len_match >= 3 * 6):  # and abs(matrix - numpy.identity(2)).max() > 0.1:
                logger.warning("Validating keypoints, %s,%s" % (matrix, offset))
                dx = matching[:, 1].x - matching[:, 0].x
                dy = matching[:, 1].y - matching[:, 0].y
                dangle = matching[:, 1].angle - matching[:, 0].angle
                dscale = numpy.log(matching[:, 1].scale / matching[:, 0].scale)
                distance = numpy.sqrt(dx * dx + dy * dy)
                outlayer = numpy.zeros(distance.shape, numpy.int8)
                outlayer += abs((distance - distance.mean()) / distance.std()) > 4
                outlayer += abs((dangle - dangle.mean()) / dangle.std()) > 4
                outlayer += abs((dscale - dscale.mean()) / dscale.std()) > 4
                print(outlayer)
                outlayersum = outlayer.sum()
                if outlayersum > 0 and not numpy.isinf(outlayersum):
                    matching2 = matching[outlayer == 0]
                    transform_matrix = matching_correction(matching2)
                    offset = numpy.array([transform_matrix[5], transform_matrix[2]], dtype=numpy.float32)
                    matrix = numpy.empty((2, 2), dtype=numpy.float32)
                    matrix[0, 0], matrix[0, 1] = transform_matrix[4], transform_matrix[3]
                    matrix[1, 0], matrix[1, 1] = transform_matrix[1], transform_matrix[0]
            if relative: #update stable part to perform a relative alignment
                self.ref_kp = kp
                if self.ROI is not None:
                    kpx = numpy.round(self.ref_kp.x).astype(numpy.int32)
                    kpy = numpy.round(self.ref_kp.y).astype(numpy.int32)
                    masked = self.ROI[(kpy, kpx)].astype(bool)
                    logger.warning("Reducing keypoint list from %i to %i because of the ROI" % (self.ref_kp.size, masked.sum()))
                    self.ref_kp = self.ref_kp[masked]
                self.buffers["ref_kp_gpu"] = pyopencl.array.to_device(self.match.queue, self.ref_kp)
                transfo = numpy.zeros((3, 3), dtype=numpy.float64)
                transfo[:2, :2] = matrix
                transfo[0, 2] = offset[0]
                transfo[1, 2] = offset[1]
                transfo[2, 2] = 1
                if self.relative_transfo is None:
                    self.relative_transfo = transfo
                else:
                    self.relative_transfo = numpy.dot(transfo, self.relative_transfo)
                matrix = numpy.ascontiguousarray(self.relative_transfo[:2, :2], dtype=numpy.float32)
                offset = numpy.ascontiguousarray(self.relative_transfo[:2, 2], dtype=numpy.float32)
#                print(self.relative_transfo)
            cpy1 = pyopencl.enqueue_copy(self.queue, self.buffers["matrix"].data, matrix)
            cpy2 = pyopencl.enqueue_copy(self.queue, self.buffers["offset"].data, offset)
            if self.profile:
                self.events += [("Copy matrix", cpy1), ("Copy offset", cpy2)]

            if self.RGB:
                shape = (4, self.shape[1], self.shape[0])
                transform = self.program.transform_RGB
            else:
                shape = self.shape[1], self.shape[0]
                transform = self.program.transform
            ev = transform(self.queue, calc_size(shape, self.wg), self.wg,
                                   self.buffers["input"].data,
                                   self.buffers["output"].data,
                                   self.buffers["matrix"].data,
                                   self.buffers["offset"].data,
                                   numpy.int32(self.shape[1]),
                                   numpy.int32(self.shape[0]),
                                   numpy.int32(self.outshape[1]),
                                   numpy.int32(self.outshape[0]),
                                   self.sift.buffers["min"].get()[0],
                                   numpy.int32(1))
            if self.profile:
                self.events += [("transform", ev)]
            result = self.buffers["output"].get()

#        print (self.buffers["offset"])
        if return_all:
#            corr = numpy.dot(matrix, numpy.vstack((matching[:, 1].y, matching[:, 1].x))).T - \
#                   offset.T - numpy.vstack((matching[:, 0].y, matching[:, 0].x)).T
            corr = numpy.dot(matrix, numpy.vstack((matching[:, 0].y, matching[:, 0].x))).T + offset.T - numpy.vstack((matching[:, 1].y, matching[:, 1].x)).T
            rms = numpy.sqrt((corr * corr).sum(axis= -1).mean())

            # Todo: calculate the RMS of deplacement and return it:
            return {"result":result, "keypoint":kp, "matching":matching, "offset":offset, "matrix": matrix, "rms":rms}
        return result
Example #2
0
    def align(self,
              img,
              shift_only=False,
              return_all=False,
              double_check=False,
              relative=False,
              orsa=False):
        """
        Align image on reference image

        :param img: numpy array containing the image to align to reference
        :param return_all: return in addition ot the image, keypoints, matching keypoints, and transformations as a dict
        :param reltive: update reference keypoints with those from current image to perform relative alignment
        :return: aligned image or all informations
        """
        logger.debug("ref_keypoints: %s" % self.ref_kp.size)
        if self.RGB:
            data = numpy.ascontiguousarray(img, numpy.uint8)
        else:
            data = numpy.ascontiguousarray(img, numpy.float32)
        with self.sem:
            cpy = pyopencl.enqueue_copy(self.queue, self.buffers["input"].data,
                                        data)
            if self.profile:
                self.events.append(("Copy H->D", cpy))
            cpy.wait()
            kp = self.sift.keypoints(self.buffers["input"])
            #            print("ref %s img %s" % (self.buffers["ref_kp_gpu"].shape, kp.shape))
            logger.debug("mod image keypoints: %s" % kp.size)
            raw_matching = self.match.match(self.buffers["ref_kp_gpu"],
                                            kp,
                                            raw_results=True)
            #            print(raw_matching.max(axis=0))

            matching = numpy.recarray(shape=raw_matching.shape,
                                      dtype=MatchPlan.dtype_kp)
            len_match = raw_matching.shape[0]
            if len_match == 0:
                logger.warning("No matching keypoints")
                return
            matching[:, 0] = self.ref_kp[raw_matching[:, 0]]
            matching[:, 1] = kp[raw_matching[:, 1]]

            if orsa:
                if feature:
                    matching = feature.sift_orsa(matching, self.shape, 1)
                else:
                    logger.warning(
                        "feature is not available. No ORSA filtering")

            if (len_match < 3 * 6) or (shift_only):  # 3 points per DOF
                if shift_only:
                    logger.debug("Shift Only mode: Common keypoints: %s" %
                                 len_match)
                else:
                    logger.warning("Shift Only mode: Common keypoints: %s" %
                                   len_match)
                dx = matching[:, 1].x - matching[:, 0].x
                dy = matching[:, 1].y - matching[:, 0].y
                matrix = numpy.identity(2, dtype=numpy.float32)
                offset = numpy.array([+numpy.median(dy), +numpy.median(dx)],
                                     numpy.float32)
            else:
                logger.debug("Common keypoints: %s" % len_match)

                transform_matrix = matching_correction(matching)
                offset = numpy.array(
                    [transform_matrix[5], transform_matrix[2]],
                    dtype=numpy.float32)
                matrix = numpy.empty((2, 2), dtype=numpy.float32)
                matrix[0,
                       0], matrix[0,
                                  1] = transform_matrix[4], transform_matrix[3]
                matrix[1,
                       0], matrix[1,
                                  1] = transform_matrix[1], transform_matrix[0]
            if double_check and (
                    len_match >=
                    3 * 6):  # and abs(matrix - numpy.identity(2)).max() > 0.1:
                logger.warning("Validating keypoints, %s,%s" %
                               (matrix, offset))
                dx = matching[:, 1].x - matching[:, 0].x
                dy = matching[:, 1].y - matching[:, 0].y
                dangle = matching[:, 1].angle - matching[:, 0].angle
                dscale = numpy.log(matching[:, 1].scale / matching[:, 0].scale)
                distance = numpy.sqrt(dx * dx + dy * dy)
                outlayer = numpy.zeros(distance.shape, numpy.int8)
                outlayer += abs(
                    (distance - distance.mean()) / distance.std()) > 4
                outlayer += abs((dangle - dangle.mean()) / dangle.std()) > 4
                outlayer += abs((dscale - dscale.mean()) / dscale.std()) > 4
                #                 print(outlayer)
                outlayersum = outlayer.sum()
                if outlayersum > 0 and not numpy.isinf(outlayersum):
                    matching2 = matching[outlayer == 0]
                    transform_matrix = matching_correction(matching2)
                    offset = numpy.array(
                        [transform_matrix[5], transform_matrix[2]],
                        dtype=numpy.float32)
                    matrix = numpy.empty((2, 2), dtype=numpy.float32)
                    matrix[0, 0], matrix[
                        0, 1] = transform_matrix[4], transform_matrix[3]
                    matrix[1, 0], matrix[
                        1, 1] = transform_matrix[1], transform_matrix[0]
            if relative:  # update stable part to perform a relative alignment
                self.ref_kp = kp
                if self.ROI is not None:
                    kpx = numpy.round(self.ref_kp.x).astype(numpy.int32)
                    kpy = numpy.round(self.ref_kp.y).astype(numpy.int32)
                    masked = self.ROI[(kpy, kpx)].astype(bool)
                    logger.warning(
                        "Reducing keypoint list from %i to %i because of the ROI"
                        % (self.ref_kp.size, masked.sum()))
                    self.ref_kp = self.ref_kp[masked]
                self.buffers["ref_kp_gpu"] = pyopencl.array.to_device(
                    self.match.queue, self.ref_kp)
                transfo = numpy.zeros((3, 3), dtype=numpy.float64)
                transfo[:2, :2] = matrix
                transfo[0, 2] = offset[0]
                transfo[1, 2] = offset[1]
                transfo[2, 2] = 1
                if self.relative_transfo is None:
                    self.relative_transfo = transfo
                else:
                    self.relative_transfo = numpy.dot(transfo,
                                                      self.relative_transfo)
                matrix = numpy.ascontiguousarray(self.relative_transfo[:2, :2],
                                                 dtype=numpy.float32)
                offset = numpy.ascontiguousarray(self.relative_transfo[:2, 2],
                                                 dtype=numpy.float32)
#                print(self.relative_transfo)
            cpy1 = pyopencl.enqueue_copy(self.queue,
                                         self.buffers["matrix"].data, matrix)
            cpy2 = pyopencl.enqueue_copy(self.queue,
                                         self.buffers["offset"].data, offset)
            if self.profile:
                self.events += [("Copy matrix", cpy1), ("Copy offset", cpy2)]

            if self.RGB:
                shape = (4, self.shape[1], self.shape[0])
                transform = self.program.transform_RGB
            else:
                shape = self.shape[1], self.shape[0]
                transform = self.program.transform
#             print(kernel_workgroup_size(self.program, transform), self.wg, self.ctx.devices[0].max_work_item_sizes)
            ev = transform(self.queue, calc_size(shape, self.wg), self.wg,
                           self.buffers["input"].data,
                           self.buffers["output"].data,
                           self.buffers["matrix"].data,
                           self.buffers["offset"].data,
                           numpy.int32(self.shape[1]),
                           numpy.int32(self.shape[0]),
                           numpy.int32(self.outshape[1]),
                           numpy.int32(self.outshape[0]),
                           self.sift.buffers["min"].get()[0], numpy.int32(1))
            if self.profile:
                self.events += [("transform", ev)]
            result = self.buffers["output"].get()

#        print (self.buffers["offset"])
        if return_all:
            #            corr = numpy.dot(matrix, numpy.vstack((matching[:, 1].y, matching[:, 1].x))).T - \
            #                   offset.T - numpy.vstack((matching[:, 0].y, matching[:, 0].x)).T
            corr = numpy.dot(
                matrix, numpy.vstack(
                    (matching[:, 0].y,
                     matching[:, 0].x))).T + offset.T - numpy.vstack(
                         (matching[:, 1].y, matching[:, 1].x)).T
            rms = numpy.sqrt((corr * corr).sum(axis=-1).mean())

            # Todo: calculate the RMS of deplacement and return it:
            return {
                "result": result,
                "keypoint": kp,
                "matching": matching,
                "offset": offset,
                "matrix": matrix,
                "rms": rms
            }
        return result
Example #3
0
print(img.shape)
plan = sift.SiftPlan(template=img, devicetype="gpu")
match = sift.MatchPlan(devicetype='GPU')
kp = plan.keypoints(img)
print(len(kp))
pos = {"dx":[], "dy":[], "dt":[], "dr":[]}
last = kp
h5 = h5py.File("keypoints.h5")
for fn in images:
    img = imread(fn)
    kp = plan.keypoints(img)
    h5[fn] = kp
    m = match.match(kp,last)
    dx = dy = dt = dr = 0
    if len(m):
        n = feature.sift_orsa(m)
        #cutof = m[:,:].scale.mean() + m[:,:].scale.std()
        #(m[:,0].scale - m[:,1].scale)/m[:,0].scale < 0.2
        #keep = numpy.logical_and((abs(m[:,0].scale - m[:,1].scale)/m[:,0].scale < 0.2), m[:,:].scale.max(axis=-1)<cutof)
        #n = m[keep]
        if len(n):
            dx = float(numpy.median(n[:,0].x-n[:,1].x))
            dy = float(numpy.median(n[:,0].y-n[:,1].y))
            dt = float(numpy.median((n[:,0].angle-n[:,1].angle+numpy.pi)%(2*numpy.pi)-numpy.pi))
            x = n[:,0].x - n[:,0].x.mean()
            y = n[:,0].y - n[:,0].y.mean()
            w = n[:,1].x - n[:,1].x.mean()
            z = n[:,1].y - n[:,1].y.mean()
            dr = float(numpy.arctan2((w*y-z*x).sum(), (w*x+z*y).sum()))

    print("%s \t dx:%4.1f \t dy=%4.1f \t dt=%5.3f \t dr=%5.3f"%(fn, dx, dy, dt, dr))