Ejemplo n.º 1
0
class WrapDemo(ImageProcessDemo):
    TITLE = u"Affine & Perspective Demo"
    DEFAULT_IMAGE = "lena.jpg"

    m_afffine = Array(np.float, (2, 3))
    m_perspective = Array(np.float, (3, 3))
    size = Tuple(Int, Int)
    method = Enum(["Affine", "Perspective"])
    poly = Instance(PolygonWidget)
    points = Dict

    def __init__(self, **traits):
        super(WrapDemo, self).__init__(**traits)

        def on_method_changed(obj, name, old_value, new_value):
            self.points["{}_dst".format(old_value)] = self.poly.points.copy()
            self.poly.set_points(self.points["{}_dst".format(new_value)])

        self.on_trait_change(on_method_changed, "method")
        self.connect_dirty("poly.changed,size,method")

    def control_panel(self):
        return VGroup(
            Item("size", label=u"图像大小"),
            Item("method", label=u"变换类型", width=250),
            Item("m_afffine", label=u"变换矩阵",
                 editor=ArrayEditor(format_str="%g"), visible_when="method=='Affine'"),
            Item("m_perspective", label=u"变换矩阵",
                 editor=ArrayEditor(format_str="%g"), visible_when="method=='Perspective'"),
        )

    def init_draw(self):
        style = {"marker": "o"}
        self.poly = PolygonWidget(axe=self.axe, points=np.zeros((3, 2)), style=style)
        self.setup_widgets()

    def setup_widgets(self):
        h, w = self.img.shape[:2]
        self.size = w * 2, h * 2
        self.points = {
            "Affine_dst": np.array([[w * 0.5, h * 0.5], [w * 1.5, h * 0.5], [w * 0.5, h * 1.5]]),
            "Perspective_dst": np.array([[w * 0.5, h * 0.5], [w * 1.5, h * 0.5],
                                         [w * 1.5, h * 1.5], [w * 0.5, h * 1.5]])
        }
        offset = [w * 0.5, h * 0.5]
        self.points["Affine_src"] = (self.points["Affine_dst"] - offset).astype(np.float32)
        self.points["Perspective_src"] = (self.points["Perspective_dst"] - offset).astype(np.float32)
        self.poly.points = self.points["{}_dst".format(self.method)]
        self.poly.update()

    def _img_changed(self):
        if self.poly is not None:
            self.setup_widgets()

    def draw(self):
        if self.poly is not None:
            dst = self.poly.points.copy()
            src = self.points["{}_src".format(self.method)]
            self.points["{}_dst".format(self.method)] = dst

            dst = dst.astype(np.float32)
            if self.method == "Affine":
                self.m_afffine = cv2.getAffineTransform(src, dst)
                img2 = cv2.warpAffine(self.img, self.m_afffine,
                                      self.size, borderValue=[255]*4)
            elif self.method == "Perspective":
                self.m_perspective = cv2.getPerspectiveTransform(src, dst)
                img2 = cv2.warpPerspective(self.img, self.m_perspective,
                                           self.size, borderValue=[255]*4)
            self.draw_image(img2)
        else:
            self.draw_image(self.img)
Ejemplo n.º 2
0
 def init_draw(self):
     style = {"marker": "o"}
     self.poly = PolygonWidget(axe=self.axe, points=np.zeros((3, 2)), style=style)
     self.setup_widgets()
Ejemplo n.º 3
0
 def init_draw(self):
     style = {"marker": "o"}
     self.poly = PolygonWidget(axe=self.axe,
                               points=np.zeros((3, 2)),
                               style=style)
     self.init_poly()
Ejemplo n.º 4
0
class SURFDemo(ImageProcessDemo):
    TITLE = "SURF Demo"
    DEFAULT_IMAGE = "lena.jpg"
    SETTINGS = ["m_perspective", "hessian_threshold", "n_octaves"]
    m_perspective = Array(np.float, (3, 3))
    m_perspective2 = Array(np.float, (3, 3))

    hessian_threshold = Int(2000)
    n_octaves = Int(2)

    poly = Instance(PolygonWidget)

    def control_panel(self):
        return VGroup(
            Item("m_perspective",
                 label="变换矩阵",
                 editor=ArrayEditor(format_str="%g")),
            Item("m_perspective2",
                 label="变换矩阵",
                 editor=ArrayEditor(format_str="%g")),
            Item("hessian_threshold", label="hessianThreshold"),
            Item("n_octaves", label="nOctaves"))

    def __init__(self, **kwargs):
        super(SURFDemo, self).__init__(**kwargs)
        self.poly = None
        self.init_points = None
        self.lines = LineCollection([], linewidths=1, alpha=0.6, color="red")
        self.axe.add_collection(self.lines)
        self.connect_dirty("poly.changed,hessian_threshold,n_octaves")

    def init_poly(self):
        if self.poly is None:
            return
        h, w, _ = self.img_color.shape
        self.init_points = np.array([(w, 0), (2 * w, 0), (2 * w, h), (w, h)],
                                    np.float32)
        self.poly.set_points(self.init_points)
        self.poly.update()

    def init_draw(self):
        style = {"marker": "o"}
        self.poly = PolygonWidget(axe=self.axe,
                                  points=np.zeros((3, 2)),
                                  style=style)
        self.init_poly()

    @on_trait_change("hessian_threshold, n_octaves")
    def calc_surf1(self):
        self.surf = cv2.SURF(self.hessian_threshold, self.n_octaves)
        self.key_points1, self.features1 = self.surf.detectAndCompute(
            self.img_gray, None)
        self.key_positions1 = np.array([kp.pt for kp in self.key_points1])

    def _img_changed(self):
        self.img_gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        self.img_color = cv2.cvtColor(self.img_gray, cv2.COLOR_GRAY2RGB)
        self.img_show = np.concatenate([self.img_color, self.img_color],
                                       axis=1)
        self.size = self.img_color.shape[1], self.img_color.shape[0]
        self.calc_surf1()

        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=100)

        self.matcher = cv2.FlannBasedMatcher(index_params, search_params)

        self.init_poly()

    def settings_loaded(self):
        src = self.init_points.copy()
        w, h = self.size
        src[:, 0] -= w
        dst = cv2.perspectiveTransform(src[None, :, :], self.m_perspective)
        dst = dst.squeeze()
        dst[:, 0] += w
        self.poly.set_points(dst)
        self.poly.update()

    def draw(self):
        if self.poly is None:
            return
        w, h = self.size
        src = self.init_points.copy()
        dst = self.poly.points.copy().astype(np.float32)
        src[:, 0] -= w
        dst[:, 0] -= w
        m = cv2.getPerspectiveTransform(src, dst)
        self.m_perspective = m
        img2 = cv2.warpPerspective(self.img_gray,
                                   m,
                                   self.size,
                                   borderValue=[255] * 4)
        self.img_show[:, w:, :] = img2[:, :, None]
        key_points2, features2 = self.surf.detectAndCompute(img2, None)

        key_positions2 = np.array([kp.pt for kp in key_points2])

        match_list = self.matcher.knnMatch(self.features1, features2, k=1)
        index1 = np.array([m[0].queryIdx for m in match_list])
        index2 = np.array([m[0].trainIdx for m in match_list])

        distances = np.array([m[0].distance for m in match_list])

        n = min(50, len(distances))
        best_index = np.argsort(distances)[:n]
        matched_positions1 = self.key_positions1[index1[best_index]]
        matched_positions2 = key_positions2[index2[best_index]]

        self.m_perspective2, mask = cv2.findHomography(matched_positions1,
                                                       matched_positions2,
                                                       cv2.RANSAC)

        lines = np.concatenate([matched_positions1, matched_positions2],
                               axis=1)
        lines[:, 2] += w
        line_colors = COLORS[mask.ravel()]
        self.lines.set_segments(lines.reshape(-1, 2, 2))
        self.lines.set_color(line_colors)
        self.draw_image(self.img_show)
Ejemplo n.º 5
0
class WrapDemo(ImageProcessDemo):
    TITLE = u"Affine & Perspective Demo"
    DEFAULT_IMAGE = "lena.jpg"

    m_afffine = Array(np.float, (2, 3))
    m_perspective = Array(np.float, (3, 3))
    size = Tuple(Int, Int)
    method = Enum(["Affine", "Perspective"])
    poly = Instance(PolygonWidget)
    points = Dict

    def __init__(self, **traits):
        super(WrapDemo, self).__init__(**traits)

        def on_method_changed(obj, name, old_value, new_value):
            self.points["{}_dst".format(old_value)] = self.poly.points.copy()
            self.poly.set_points(self.points["{}_dst".format(new_value)])

        self.on_trait_change(on_method_changed, "method")
        self.connect_dirty("poly.changed,size,method")

    def control_panel(self):
        return VGroup(
            Item("size", label=u"图像大小"),
            Item("method", label=u"变换类型", width=250),
            Item("m_afffine",
                 label=u"变换矩阵",
                 editor=ArrayEditor(format_str="%g"),
                 visible_when="method=='Affine'"),
            Item("m_perspective",
                 label=u"变换矩阵",
                 editor=ArrayEditor(format_str="%g"),
                 visible_when="method=='Perspective'"),
        )

    def init_draw(self):
        style = {"marker": "o"}
        self.poly = PolygonWidget(axe=self.axe,
                                  points=np.zeros((3, 2)),
                                  style=style)
        self.setup_widgets()

    def setup_widgets(self):
        h, w = self.img.shape[:2]
        self.size = w * 2, h * 2
        self.points = {
            "Affine_dst":
            np.array([[w * 0.5, h * 0.5], [w * 1.5, h * 0.5],
                      [w * 0.5, h * 1.5]]),
            "Perspective_dst":
            np.array([[w * 0.5, h * 0.5], [w * 1.5, h * 0.5],
                      [w * 1.5, h * 1.5], [w * 0.5, h * 1.5]])
        }
        offset = [w * 0.5, h * 0.5]
        self.points["Affine_src"] = (self.points["Affine_dst"] -
                                     offset).astype(np.float32)
        self.points["Perspective_src"] = (self.points["Perspective_dst"] -
                                          offset).astype(np.float32)
        self.poly.points = self.points["{}_dst".format(self.method)]
        self.poly.update()

    def _img_changed(self):
        if self.poly is not None:
            self.setup_widgets()

    def draw(self):
        if self.poly is not None:
            dst = self.poly.points.copy()
            src = self.points["{}_src".format(self.method)]
            self.points["{}_dst".format(self.method)] = dst

            dst = dst.astype(np.float32)
            if self.method == "Affine":
                self.m_afffine = cv2.getAffineTransform(src, dst)
                img2 = cv2.warpAffine(self.img,
                                      self.m_afffine,
                                      self.size,
                                      borderValue=[255] * 4)
            elif self.method == "Perspective":
                self.m_perspective = cv2.getPerspectiveTransform(src, dst)
                img2 = cv2.warpPerspective(self.img,
                                           self.m_perspective,
                                           self.size,
                                           borderValue=[255] * 4)
            self.draw_image(img2)
        else:
            self.draw_image(self.img)
Ejemplo n.º 6
0
class SURFDemo(ImageProcessDemo):
    TITLE = "SURF Demo"
    DEFAULT_IMAGE = "lena.jpg"
    SETTINGS = ["m_perspective", "hessian_threshold", "n_octaves"]
    m_perspective = Array(np.float, (3, 3))
    m_perspective2 = Array(np.float, (3, 3))

    hessian_threshold = Int(2000)
    n_octaves = Int(2)

    poly = Instance(PolygonWidget)

    def control_panel(self):
        return VGroup(
            Item("m_perspective", label=u"变换矩阵", editor=ArrayEditor(format_str="%g")),
            Item("m_perspective2", label=u"变换矩阵", editor=ArrayEditor(format_str="%g")),
            Item("hessian_threshold", label=u"hessianThreshold"),
            Item("n_octaves", label=u"nOctaves")
        )

    def __init__(self, **kwargs):
        super(SURFDemo, self).__init__(**kwargs)
        self.poly = None
        self.init_points = None
        self.lines = LineCollection([], linewidths=1, alpha=0.6, color="red")
        self.axe.add_collection(self.lines)
        self.connect_dirty("poly.changed,hessian_threshold,n_octaves")

    def init_poly(self):
        if self.poly is None:
            return
        h, w, _ = self.img_color.shape
        self.init_points = np.array([(w, 0), (2*w, 0), (2*w, h), (w, h)], np.float32)
        self.poly.set_points(self.init_points)
        self.poly.update()

    def init_draw(self):
        style = {"marker": "o"}
        self.poly = PolygonWidget(axe=self.axe, points=np.zeros((3, 2)), style=style)
        self.init_poly()

    @on_trait_change("hessian_threshold, n_octaves")
    def calc_surf1(self):
        self.surf = cv2.SURF(self.hessian_threshold, self.n_octaves)
        self.key_points1, self.features1 = self.surf.detectAndCompute(self.img_gray, None)
        self.key_positions1 = np.array([kp.pt for kp in self.key_points1])

    def _img_changed(self):
        self.img_gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        self.img_color = cv2.cvtColor(self.img_gray, cv2.COLOR_GRAY2RGB)
        self.img_show = np.concatenate([self.img_color, self.img_color], axis=1)
        self.size = self.img_color.shape[1], self.img_color.shape[0]
        self.calc_surf1()

        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=100)

        self.matcher = cv2.FlannBasedMatcher(index_params, search_params)

        self.init_poly()

    def settings_loaded(self):
        src = self.init_points.copy()
        w, h = self.size
        src[:, 0] -= w
        dst = cv2.perspectiveTransform(src[None, :, :], self.m_perspective)
        dst = dst.squeeze()
        dst[:, 0] += w
        self.poly.set_points(dst)
        self.poly.update()

    def draw(self):
        if self.poly is None:
            return
        w, h = self.size
        src = self.init_points.copy()
        dst = self.poly.points.copy().astype(np.float32)
        src[:, 0] -= w
        dst[:, 0] -= w
        m = cv2.getPerspectiveTransform(src, dst)
        self.m_perspective = m
        img2 = cv2.warpPerspective(self.img_gray, m, self.size, borderValue=[255]*4)
        self.img_show[:, w:, :] = img2[:, :, None]
        key_points2, features2 = self.surf.detectAndCompute(img2, None)

        key_positions2 = np.array([kp.pt for kp in key_points2])

        match_list = self.matcher.knnMatch(self.features1, features2, k=1)
        index1 = np.array([m[0].queryIdx for m in match_list])
        index2 = np.array([m[0].trainIdx for m in match_list])

        distances = np.array([m[0].distance for m in match_list])

        n = min(50, len(distances))
        best_index = np.argsort(distances)[:n]
        matched_positions1 = self.key_positions1[index1[best_index]]
        matched_positions2 = key_positions2[index2[best_index]]

        self.m_perspective2, mask = cv2.findHomography(matched_positions1, matched_positions2, cv2.RANSAC)

        lines = np.concatenate([matched_positions1, matched_positions2], axis=1)
        lines[:, 2] += w
        line_colors = COLORS[mask.ravel()]
        self.lines.set_segments(lines.reshape(-1, 2, 2))
        self.lines.set_color(line_colors)
        self.draw_image(self.img_show)