예제 #1
0
    def __init__(self, **traits):
        super(HistDemo, self).__init__(**traits)
        img = cv.imread("lena.jpg")
        gray_img = cv.Mat()
        cv.cvtColor(img, gray_img, cv.CV_BGR2GRAY)
        self.img = gray_img
        self.img2 = self.img.clone()
        result = cv.MatND()

        r = cv.vector_float32([0, 256])
        ranges = cv.vector_vector_float32([r, r])

        cv.calcHist(cv.vector_Mat([self.img]),
                    channels=cv.vector_int([0, 1]),
                    mask=cv.Mat(),
                    hist=result,
                    histSize=cv.vector_int([256]),
                    ranges=ranges)

        data = ArrayPlotData(x=np.arange(0, len(result[:])), y=result[:])
        self.plot = Plot(data, padding=10)
        line = self.plot.plot(("x", "y"))[0]
        self.select_tool = RangeSelection(line, left_button_selects=True)
        line.tools.append(self.select_tool)
        self.select_tool.on_trait_change(self._selection_changed, "selection")
        line.overlays.append(RangeSelectionOverlay(component=line))

        cv.imshow("Hist Demo", self.img)

        self.timer = Timer(50, self.on_timer)
예제 #2
0
    def redraw(self):

        edge_img = cv.Mat()
        # 边缘检测
        cv.Canny(self.img_gray, edge_img, self.th1, self.th2)
        3  ###
        # 计算结果图
        if self.show_canny:
            show_img = cv.Mat()
            cv.cvtColor(edge_img, show_img, cv.CV_GRAY2BGR)
        else:
            show_img = self.img.clone()
        4  ###
        # 线段检测
        theta = self.theta / 180.0 * np.pi
        lines = cv.HoughLinesP(edge_img, self.rho, theta, self.hough_th,
                               self.minlen, self.maxgap)
        for line in lines:
            cv.line(show_img, cv.asPoint(line[:2]), cv.asPoint(line[2:]),
                    cv.CV_RGB(255, 0, 0), 2)
        5  ###
        # 圆形检测
        circles = cv.HoughCircles(self.img_smooth,
                                  3,
                                  self.dp,
                                  self.mindist,
                                  param1=self.param1,
                                  param2=self.param2)

        for circle in circles:
            cv.circle(show_img, cv.Point(int(circle[0]), int(circle[1])),
                      int(circle[2]), cv.CV_RGB(0, 255, 0), 2)

        cv.imshow("Hough Demo", show_img)
예제 #3
0
def findPairs(surf, objectKeypoints, objectDescriptors, imageKeypoints,
              imageDescriptors):
    ptpairs = []

    N = len(objectKeypoints)
    K = 2  # K as in K nearest neighbors
    dim = surf.descriptorSize()

    m_object = cv.asMat(objectDescriptors).reshape(1, N)
    m_image = cv.asMat(imageDescriptors).reshape(1, len(imageKeypoints))

    flann = cv.Index(m_image, cv.KDTreeIndexParams(4))

    indices = cv.Mat(N, K, cv.CV_32S)
    dists = cv.Mat(N, K, cv.CV_32F)

    flann.knnSearch(m_object, indices, dists, K, cv.SearchParams(250))

    indices = indices[:, 0].ravel()
    dists = dists.ndarray

    for i in xrange(N):
        if dists[i, 0] < 0.6 * dists[i, 1]:
            ptpairs.append(
                (objectKeypoints[i].pt, imageKeypoints[int(indices[i])].pt))

    return ptpairs
예제 #4
0
 def __init__(self, *args, **kwargs):
     super(RemapDemo, self).__init__(*args, **kwargs)
     self.img = cv.imread("lena.jpg")
     self.size = self.img.size()
     self.w, self.h = self.size.width, self.size.height
     self.dstimg = cv.Mat()
     self.map1 = cv.Mat(self.size, cv.CV_32FC1)
     self.map2 = cv.Mat(self.size, cv.CV_32FC1)
     self.gridimg = self.make_grid_img()
     self.on_trait_change(self.redraw, "surf_func,range,view_height,grid")
예제 #5
0
def show_video(fileorid):
    cv.namedWindow(str(fileorid), cv.CV_WINDOW_AUTOSIZE)
    video = cv.VideoCapture(fileorid) 
    img = cv.Mat() 
    img2 = cv.Mat()    
    while video.grab():     
        video.retrieve(img, 0) 
        #cv.cvtColor(img, img2, cv.CV_GBR2RGB)
        cv.imshow(str(fileorid), img)
        cv.waitKey(5)
예제 #6
0
    def redraw(self):
        # 同时显示两幅图像
        w = self.img1.size().width
        h = self.img1.size().height
        show_img = cv.Mat(cv.Size(w * 2, h), cv.CV_8UC3)
        for i in range(3):
            show_img[:, :w, i] = self.img1[:]
            show_img[:, w:, i] = self.img2[:]

        # 绘制特征线条
        if self.draw_circle:
            self.draw_keypoints(show_img, self.keypoints1, 0)
            self.draw_keypoints(show_img, self.keypoints2, w)

        # 绘制直线连接距离小于阈值的两个特征点
        for idx1 in np.where(self.mindist < self.max_distance)[0]:
            idx2 = self.idx_mindist[idx1]
            pos1 = self.keypoints1[int(idx1)].pt
            pos2 = self.keypoints2[int(idx2)].pt

            p1 = cv.Point(int(pos1.x), int(pos1.y))
            p2 = cv.Point(int(pos2.x) + w, int(pos2.y))
            cv.line(show_img, p1, p2, cv.CV_RGB(0, 255, 255), lineType=16)

        cv.imshow("SURF Demo", show_img)
예제 #7
0
 def affine(self):
     self.img2 = cv.Mat()
     M = cv.asMat(self.m, force_single_channel=True)
     cv.warpAffine(self.img1,
                   self.img2,
                   M,
                   self.img1.size(),
                   borderValue=cv.CV_RGB(255, 255, 255))
예제 #8
0
 def redraw(self):
     src = cv.asvector_Point2f(self.src)
     dst = cv.asvector_Point2f(self.dst)
     m = cv.getPerspectiveTransform(src, dst)
     print m
     img2 = cv.Mat()
     cv.warpPerspective(self.img, img2, m, self.img.size())
     cv.imshow("Perspective Demo", img2)
예제 #9
0
 def redraw(self):
     M = cv.asMat(self.m, force_single_channel=True)
     size = cv.Size(int(self.size[0, 0]), int(self.size[0, 1]))
     img2 = cv.Mat()
     if size.width > 0 and size.height > 0:
         cv.warpAffine(self.img,
                       img2,
                       M,
                       size,
                       borderValue=cv.CV_RGB(255, 255, 255))
         cv.imshow("Affine Demo", img2)
예제 #10
0
 def redraw(self):
     img2 = cv.Mat()
     element = cv.asMat(self.structing_element, force_single_channel=True)
     if self.process_type.startswith("MORPH_"):
         type = getattr(cv, self.process_type)
         cv.morphologyEx(self.img, img2, type, element, iterations=self.iter)
     else:
         func = getattr(cv, self.process_type)
         func(self.img, img2, element, iterations=self.iter)
         
     cv.imshow("Morphology Demo", img2)
예제 #11
0
 def __init__(self, *args, **kwargs):
     super(SURFDemo, self).__init__(*args, **kwargs)
     img = cv.imread("lena_small.jpg")
     self.m = np.array([[0.8, -0.6, 60], [0.6, 0.7, -20]])
     self.img1 = cv.Mat()
     cv.cvtColor(img, self.img1, cv.CV_BGR2GRAY)
     self.affine()
     self.on_trait_change(self.redraw, "max_distance,draw_circle")
     self.on_trait_change(self.recalculate, "m,hessian_th,octaves,layers")
     self.recalculate()
     self.redraw()
    def __init__(self):
        #读入图像
        img = cv.imread("lena_full.jpg")
        img2 = cv.Mat()
        cv.cvtColor(img, img2, cv.CV_BGR2GRAY)
        img = cv.Mat()
        cv.resize(img2, img, cv.Size(N, N))
        self.fimg = fft.fft2(img[:])  # 图像的频域信号
        mag_img = np.log10(np.abs(self.fimg))

        # 创建计算用图像
        filtered_img = np.zeros((N, N), dtype=np.float)
        self.mask = np.zeros((N, N), dtype=np.float)
        self.mask_img = cv.asMat(self.mask)  # 在self.mask上绘制多边形用的图像

        # 创建数据源
        self.data = ArrayPlotData(mag_img=fft.fftshift(mag_img),
                                  filtered_img=filtered_img,
                                  mask_img=self.mask)

        # 创建三个图像绘制框以及容器
        meg_plot, img = self.make_image_plot("mag_img")
        mask_plot, _ = self.make_image_plot("mask_img")
        filtered_plot, _ = self.make_image_plot("filtered_img")
        self.plot = HPlotContainer(meg_plot, mask_plot, filtered_plot)

        # 创建套索工具
        lasso_selection = LassoSelection(component=img)
        lasso_overlay = LassoOverlay(lasso_selection=lasso_selection,
                                     component=img,
                                     selection_alpha=0.3)
        img.tools.append(lasso_selection)
        img.overlays.append(lasso_overlay)
        self.lasso_selection = lasso_selection

        # 监听套索工具的事件、开启时钟事件
        lasso_selection.on_trait_change(self.lasso_updated,
                                        "disjoint_selections")
        self.timer = Timer(50, self.on_timer)
 def __init__(self, *args, **kwargs):
     super(HoughDemo, self).__init__(*args, **kwargs)
     
     self.img = cv.imread("stuff.jpg")
     self.img_gray = cv.Mat()
     cv.cvtColor(self.img, self.img_gray, cv.CV_BGR2GRAY)
     
     self.img_smooth = self.img_gray.clone()
     cv.smooth(self.img_gray, self.img_smooth, cv.CV_GAUSSIAN, 7, 7, 0, 0)
     
     self.redraw()
     
     self.on_trait_change(self.redraw,
         "th1,th2,show_canny,rho,theta,hough_th,minlen,maxgap,dp,mindist,param1,param2")
예제 #14
0
 def __init__(self, *args, **kwargs):
     super(InPaintDemo, self).__init__(*args, **kwargs)
     self.img = cv.imread("stuff.jpg")  # 原始图像
     self.img2 = self.img.clone()  # inpaint效果预览图像
     self.mask = cv.Mat(self.img.size(), cv.CV_8UC1)  # 储存选区的图像
     self.mask[:] = 0
     self.data = ArrayPlotData(img=self.img[:, :, ::-1])
     self.plot = Plot(self.data,
                      padding=10,
                      aspect_ratio=float(self.img.size().width) /
                      self.img.size().height)
     self.plot.x_axis.visible = False
     self.plot.y_axis.visible = False
     imgplot = self.plot.img_plot("img", origin="top left")[0]
     self.painter = CirclePainter(component=imgplot)
     imgplot.overlays.append(self.painter)
예제 #15
0
 def redraw(self):
     img2 = cv.Mat()
     kernel = cv.asMat(self.kernel * self.scale, force_single_channel=True)
     cv.filter2D(self.img, img2, -1, kernel)
     cv.imshow("Filter Demo", img2)
예제 #16
0
# -*- coding: utf-8 -*-
import pyopencv as cv
import numpy as np
import time

img = cv.asMat(np.random.rand(1000, 1000))

row = cv.getGaussianKernel(7, -1)
col = cv.getGaussianKernel(5, -1)

kernel = cv.asMat(np.dot(col[:], row[:].T), force_single_channel=True)

img2 = cv.Mat()
img3 = cv.Mat()

start = time.clock()
cv.filter2D(img, img2, -1, kernel)
print(("filter2D:", time.clock() - start))

start = time.clock()
cv.sepFilter2D(img, img3, -1, row, col)
print(("sepFilter3D:", time.clock() - start))

print(("error=", np.max(np.abs(img2[:] - img3[:]))))
# -*- coding: utf-8 -*-
import pyopencv as cv
import numpy as np

img = cv.imread("lena.jpg")
result = cv.MatND()

r = cv.vector_float32([0, 256])
ranges = cv.vector_vector_float32([r, r])

cv.calcHist(cv.vector_Mat([img]),
            channels=cv.vector_int([0, 1]),
            mask=cv.Mat(),
            hist=result,
            histSize=cv.vector_int([30, 20]),
            ranges=ranges)

hist, _x, _y = np.histogram2d(img[:, :, 0].flatten(),
                              img[:, :, 1].flatten(),
                              bins=(30, 20),
                              range=[(0, 256), (0, 256)])

print np.all(hist == result[:])
예제 #18
0
import pyopencv as cv

y, x = np.ogrid[-2:2:300j, -2:2:300j]
z = (x**2 + y**2 - 1)**3 - x**2 * y**3

fig = plt.figure(figsize=(4, 4))
w, h = fig.bbox.width, fig.bbox.height

video = None
for level in np.linspace(-0.2, 0.2, 101):
    fig.clear()
    axe = fig.add_subplot(111, aspect=1)
    plt.contour(x.ravel(), y.ravel(), z, levels=[level])
    plt.title("level=%5.3f" % level)
    axe.xaxis.set_ticks([])
    axe.yaxis.set_ticks([])
    fig.canvas.draw()
    buf = fig.canvas.buffer_rgba(0, 0)
    array = np.frombuffer(buf, np.uint8)
    array.shape = h, w, 4

    if not video:
        video = cv.VideoWriter()
        size = cv.Size2i(int(w), int(h))
        #video.open("contour.avi", cv.CV_FOURCC(*"DIB "), 30, size)
        video.open("contour.avi", cv.CV_FOURCC(*"ffds"), 30, size)
        image = cv.Mat(size, cv.CV_8UC3)
    image[:] = array[:, :, 2::-1]
    video << image
del video
예제 #19
0
# -*- coding: utf-8 -*-
import pyopencv as cv
import numpy as np

y, x = np.ogrid[-1:1:250j, -1:1:250j]
z = np.sin(10 * np.sqrt(x * x + y * y)) * 0.5 + 0.5
np.round(z, decimals=1, out=z)

img = cv.asMat(z)

cv.namedWindow("demo1")
cv.imshow("demo1", img)

img2 = cv.Mat()
cv.Laplacian(img, img2, img.depth(), ksize=3)

cv.namedWindow("demo2")
cv.imshow("demo2", img2)
cv.waitKey(0)
예제 #20
0
 def get_features(self, img):
     surf = cv.SURF(self.hessian_th, self.octaves, self.layers, True)
     keypoints = cv.vector_KeyPoint()
     features = surf(img, cv.Mat(), keypoints)
     return keypoints, np.array(features)
예제 #21
0
        cv.imshow("Watershed Demo", img3)


# 区域的颜色列表
marks_color = [
    cv.CV_RGB(0, 0, 0),
    cv.CV_RGB(255, 0, 0),
    cv.CV_RGB(0, 255, 0),
    cv.CV_RGB(0, 0, 255),
    cv.CV_RGB(255, 255, 0),
    cv.CV_RGB(0, 255, 255),
    cv.CV_RGB(255, 0, 255),
    cv.CV_RGB(255, 255, 255)
]

# 将颜色列表转换为调色板数组,只取前三个通道的值
palette = np.array([c.ndarray[:-1] for c in marks_color], dtype=np.uint8)

seed = 1  # 从序号1开始设置区域颜色
mask_opacity = 0.5  # 绘制区域颜色的透明度

img = cv.imread("fruits.jpg")
img2 = img.clone()  # 绘制初始区域用
markers = cv.Mat(img2.size(), cv.CV_32S)  # 储存初始区域的数组
markers[:] = 0

cv.namedWindow("Watershed Demo")
cv.imshow("Watershed Demo", img2)
cv.setMouseCallback("Watershed Demo", mouse_call_back)
cv.waitKey(0)
예제 #22
0
# -*- coding: utf-8 -*-

import pyopencv as cv
import numpy as np

img = cv.imread("fruits_section.jpg")
img_hsv = cv.Mat()
cv.cvtColor(img, img_hsv, cv.CV_BGR2HSV)

channels = cv.vector_int([0, 1])
result = cv.MatND()

r = cv.vector_float32([0, 256])
ranges = cv.vector_vector_float32([r, r])

cv.calcHist(cv.vector_Mat([img_hsv]), channels, cv.Mat(), result,
            cv.vector_int([40, 40]), ranges)

result[:] /= np.max(result[:]) / 255
2  ###
img2 = cv.imread("fruits.jpg")
img_hsv2 = cv.Mat()
cv.cvtColor(img2, img_hsv2, cv.CV_BGR2HSV)

img_bp = cv.Mat()
cv.calcBackProject(cv.vector_Mat([img_hsv2]),
                   channels=channels,
                   hist=result,
                   backProject=img_bp,
                   ranges=ranges)
3  ###
예제 #23
0
# -*- coding: utf-8 -*-
import pyopencv as cv
import numpy as np
import matplotlib.pyplot as plt

# 读入图片并缩小为1/2
img0 = cv.imread("lena.jpg")
size = img0.size()
w, h = size.width, size.height
img1 = cv.Mat()
cv.resize(img0, img1, cv.Size(w // 2, h // 2))

# 各种卷积核
kernels = [("低通滤波器", np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]) * 0.1),
           ("高通滤波器", np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])),
           ("边缘检测", np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]))]

index = 0
for name, kernel in kernels:
    plt.subplot(131 + index)
    # 将卷积核转换为Mat对象
    kmat = cv.asMat(kernel.astype(np.float), force_single_channel=True)
    img2 = cv.Mat()
    cv.filter2D(img1, img2, -1, kmat)
    # 由于matplotlib的颜色顺序和OpenCV的顺序相反
    plt.imshow(img2[:, :, ::-1])
    plt.title(name)
    index += 1
    plt.gca().set_axis_off()
plt.subplots_adjust(0.02, 0, 0.98, 1, 0.02, 0)
plt.show()
예제 #24
0
        cv.Scalar(255, 128, 0),
        cv.Scalar(255, 255, 0),
        cv.Scalar(255, 0, 0),
        cv.Scalar(255, 0, 255),
        cv.Scalar(255, 255, 255),
    ]

    # read the two images
    object_color = cv.imread(object_filename, cv.CV_LOAD_IMAGE_COLOR)
    image = cv.imread(scene_filename, cv.CV_LOAD_IMAGE_GRAYSCALE)
    if not object_color or not image:
        print("Can not load %s and/or %s\n" \
            "Usage: find_obj [<object_filename> <scene_filename>]\n" \
            % (object_filename, scene_filename))
        exit(-1)
    object = cv.Mat(object_color.size(), cv.CV_8UC1)
    cv.cvtColor(object_color, object, cv.CV_BGR2GRAY)

    # corners
    src_corners = [
        cv.Point(0, 0),
        cv.Point(object.cols, 0),
        cv.Point(object.cols, object.rows),
        cv.Point(0, object.rows)
    ]
    dst_corners = [cv.Point()] * 4

    # find keypoints on both images
    surf = cv.SURF(500, 4, 2, True)
    mask = cv.Mat()
    tt = float(cv.getTickCount())
# -*- coding: utf-8 -*-
import pyopencv as cv

img = cv.imread("lena.jpg")
img2 = cv.Mat(img.size(), cv.CV_8UC4)

w, h = img.size().width, img.size().height

def blend(img, img2): 
    """
    混合两幅图像, 其中img2有4个通道
    """
    #使用alpha通道计算img2的混和值
    b = img2[:,:,3:] / 255.0     
    a = 1 - b # img的混合值

    #混合两幅图像
    img[:,:,:3] *= a  
    img[:,:,:3] += b * img2[:,:,:3]

img2[:] = 0
for i in xrange(0, w, w/10): 
    cv.line(img2, cv.Point(i,0), cv.Point(i, h),  
        cv.Scalar(0, 0, 255, i*255/w), 5)

blend(img, img2) 

img2[:] = 0        
for i in xrange(0, h, h/10):
    cv.line(img2, cv.Point(0,i), cv.Point(w, i), 
        cv.Scalar(0, 255, 0, i*255/h), 5)
예제 #26
0
# -*- coding: utf-8 -*-
import numpy as np
from numpy import fft
import pyopencv as cv
import matplotlib.pyplot as plt

N = 256
img = cv.imread("lena_full.jpg")
img2 = cv.Mat()
cv.cvtColor(img, img2, cv.CV_BGR2GRAY)
img = cv.Mat()
cv.resize(img2, img, cv.Size(N, N))

fimg = fft.fft2(img[:])
mag_img = np.log10(np.abs(fimg))
shift_mag_img = fft.fftshift(mag_img)

rects = [(80, 125, 85, 130), (90, 90, 95, 95), (150, 10, 250, 250),
         (110, 110, 146, 146)]

filtered_results = []
for i, (x0, y0, x1, y1) in enumerate(rects):
    mask = np.zeros((N, N), dtype=np.bool)
    mask[x0:x1 + 1, y0:y1 + 1] = True
    mask[N - x1:N - x0 + 1, N - y1:N - y0 + 1] = True
    mask = fft.fftshift(mask)
    fimg2 = fimg * mask
    filtered_img = fft.ifft2(fimg2).real
    filtered_results.append(filtered_img)

### 绘图部分 ###