Esempio n. 1
0
def findPairs(surf, objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors):
    ptpairs = []

    N = len(objectKeypoints)
    K = 2 # K as in K nearest neighbors
    dim = surf.descriptorSize()

    m_object = cv.asMat(objectDescriptors).reshape(1, N)
    m_image = cv.asMat(imageDescriptors).reshape(1, len(imageKeypoints))

    flann = cv.Index(m_image,cv.KDTreeIndexParams(4))

    indices = cv.Mat(N, K, cv.CV_32S)
    dists = cv.Mat(N, K, cv.CV_32F)

    flann.knnSearch(m_object, indices, dists, K, cv.SearchParams(250))
    
    indices = indices[:,0].ravel()
    dists = dists.ndarray
    
    for i in xrange(N):
        if dists[i,0] < 0.6*dists[i,1]:
            ptpairs.append((objectKeypoints[i].pt, imageKeypoints[int(indices[i])].pt))
    
    return ptpairs
Esempio n. 2
0
def findPairs(surf, objectKeypoints, objectDescriptors, imageKeypoints,
              imageDescriptors):
    ptpairs = []

    N = len(objectKeypoints)
    K = 2  # K as in K nearest neighbors
    dim = surf.descriptorSize()

    m_object = cv.asMat(objectDescriptors).reshape(1, N)
    m_image = cv.asMat(imageDescriptors).reshape(1, len(imageKeypoints))

    flann = cv.Index(m_image, cv.KDTreeIndexParams(4))

    indices = cv.Mat(N, K, cv.CV_32S)
    dists = cv.Mat(N, K, cv.CV_32F)

    flann.knnSearch(m_object, indices, dists, K, cv.SearchParams(250))

    indices = indices[:, 0].ravel()
    dists = dists.ndarray

    for i in xrange(N):
        if dists[i, 0] < 0.6 * dists[i, 1]:
            ptpairs.append(
                (objectKeypoints[i].pt, imageKeypoints[int(indices[i])].pt))

    return ptpairs
Esempio n. 3
0
def get_polygons(contours, hierarchy,
                 convextest=False,
#                 hole=False,
                 nsides=5,
                 min_area=100,
                 perimeter_smooth_factor=0.001,
                 **kw):
    '''
    '''

    polygons = []
    brs = []
    areas = []
    for cont, hi in zip(contours, hierarchy.tolist()):
        cont = cv.asMat(cont)
#        for i in [0.01]:
        m = cv.arcLength(cont, True)
        result = cv.approxPolyDP_int(cont, m * perimeter_smooth_factor, True)

        res_mat = cv.asMat(result)
        area = abs(cv.contourArea(res_mat))
#        print 'areas', area
#        if hole:
#            hole_flag = hi[3] != -1
#        else:
#            hole_flag = True

#        if area > min_area:
#            print 'area', area,
#            print 'hole', hole_flag
#            print 'hi', hi
#            print 'sides', len(result),
#            print 'convext', cv.isContourConvex(res_mat),
#            ch = cv.asMat(cv.convexHull_int(cont))
#            ch = cv.asMat(ch.ndarray.flatten())
#            seq = cv.convexityDefects(cont, ch, cv.createMemStorage(0))
#
#        if not hole_flag:
#            continue

        if not len(result) > nsides:
            continue

        if not area > min_area:
            continue

        if convextest and not cv.isContourConvex(res_mat):
            continue

        polygons.append(result)
        brs.append(cv.boundingRect(res_mat))
        areas.append(area)

    return polygons, brs, areas
Esempio n. 4
0
def mouse_call_back(event, x, y, flags, user_data):
    global seed

    # 右键松开时,初始化种子图像
    if event == cv.CV_EVENT_RBUTTONUP:
        img2[:] = img[:]
        markers[:] = 0
        seed = 1
        cv.imshow("Watershed Demo", img2)

    if seed == len(marks_color): return

    # 左键按下时,在种子图像上添加种子
    if flags == cv.CV_EVENT_FLAG_LBUTTON:
        pt = cv.Point(x, y)
        cv.circle(markers, pt, 5, cv.Scalar(seed, seed, seed, seed),
                  cv.CV_FILLED)
        cv.circle(img2, pt, 5, marks_color[seed], cv.CV_FILLED)
        cv.imshow("Watershed Demo", img2)

    # 左键松开时,使用watershed进行图像分割
    if event == cv.CV_EVENT_LBUTTONUP:
        seed += 1
        tmp_markers = markers.clone()
        cv.watershed(img, tmp_markers)
        color_map = tmp_markers[:].astype(np.int)

        img3 = img2.clone()
        img4 = cv.asMat(palette[color_map])
        cv.addWeighted(img3, 1.0, img4, mask_opacity, 0, img3)
        cv.imshow("Watershed Demo", img3)
 def redraw(self):
     M = cv.asMat(self.m, force_single_channel=True)
     size = cv.Size(int(self.size[0,0]), int(self.size[0,1]))
     img2 = cv.Mat()
     if size.width > 0 and size.height > 0:
         cv.warpAffine(self.img, img2, M, size, borderValue=cv.CV_RGB(255,255,255))
         cv.imshow("Affine Demo", img2)
def mouse_call_back(event, x, y, flags, user_data):
    global seed
    
    # 右键松开时,初始化种子图像
    if event == cv.CV_EVENT_RBUTTONUP: 
        img2[:] = img[:]    
        markers[:] = 0
        seed = 1
        cv.imshow("Watershed Demo", img2)
        
    if seed == len(marks_color): return
    
    # 左键按下时,在种子图像上添加种子
    if flags == cv.CV_EVENT_FLAG_LBUTTON: 
        pt = cv.Point(x, y)
        cv.circle(markers, pt, 5, cv.Scalar(seed,seed,seed,seed), cv.CV_FILLED)
        cv.circle(img2, pt, 5, marks_color[seed], cv.CV_FILLED)
        cv.imshow("Watershed Demo", img2)
        
    # 左键松开时,使用watershed进行图像分割
    if event == cv.CV_EVENT_LBUTTONUP:  
        seed += 1
        tmp_markers = markers.clone() 
        cv.watershed(img, tmp_markers)
        color_map = tmp_markers[:].astype(np.int) 
        
        img3 = img2.clone()
        img4 = cv.asMat( palette[color_map] ) 
        cv.addWeighted(img3, 1.0, img4, mask_opacity, 0, img3) 
        cv.imshow("Watershed Demo", img3)
Esempio n. 7
0
 def affine(self):
     self.img2 = cv.Mat()
     M = cv.asMat(self.m, force_single_channel=True)
     cv.warpAffine(self.img1,
                   self.img2,
                   M,
                   self.img1.size(),
                   borderValue=cv.CV_RGB(255, 255, 255))
Esempio n. 8
0
def locatePlanarObject(ptpairs, src_corners, dst_corners):
    import numpy as _n

    n = len(ptpairs)
    if n < 4:
        return 0
    pt1 = cv.asMat(_n.array([cv.asndarray(pair[0]) for pair in ptpairs]))
    pt2 = cv.asMat(_n.array([cv.asndarray(pair[1]) for pair in ptpairs]))
    h = cv.findHomography(pt1, pt2, method=cv.RANSAC, ransacReprojThreshold=5)[:]

    for i in range(4):
        x = src_corners[i].x
        y = src_corners[i].y
        Z = 1./(h[2,0]*x + h[2,1]*y + h[2,2])
        X = (h[0,0]*x + h[0,1]*y + h[0,2])*Z
        Y = (h[1,0]*x + h[1,1]*y + h[1,2])*Z
        dst_corners[i] = cv.Point(int(X), int(Y))

    return 1
Esempio n. 9
0
 def redraw(self):
     M = cv.asMat(self.m, force_single_channel=True)
     size = cv.Size(int(self.size[0, 0]), int(self.size[0, 1]))
     img2 = cv.Mat()
     if size.width > 0 and size.height > 0:
         cv.warpAffine(self.img,
                       img2,
                       M,
                       size,
                       borderValue=cv.CV_RGB(255, 255, 255))
         cv.imshow("Affine Demo", img2)
 def redraw(self):
     img2 = cv.Mat()
     element = cv.asMat(self.structing_element, force_single_channel=True)
     if self.process_type.startswith("MORPH_"):
         type = getattr(cv, self.process_type)
         cv.morphologyEx(self.img, img2, type, element, iterations=self.iter)
     else:
         func = getattr(cv, self.process_type)
         func(self.img, img2, element, iterations=self.iter)
         
     cv.imshow("Morphology Demo", img2)
 def redraw(self):
     img2 = cv.Mat()
     element = cv.asMat(self.structing_element, force_single_channel=True)
     if self.process_type.startswith("MORPH_"):
         type = getattr(cv, self.process_type)
         cv.morphologyEx(self.img, img2, type, element, iterations=self.iter)
     else:
         func = getattr(cv, self.process_type)
         func(self.img, img2, element, iterations=self.iter)
         
     cv.imshow("Morphology Demo", img2)
Esempio n. 12
0
def locatePlanarObject(ptpairs, src_corners, dst_corners):
    import numpy as _n

    n = len(ptpairs)
    if n < 4:
        return 0
    pt1 = cv.asMat(_n.array([cv.asndarray(pair[0]) for pair in ptpairs]))
    pt2 = cv.asMat(_n.array([cv.asndarray(pair[1]) for pair in ptpairs]))
    h = cv.findHomography(pt1, pt2, method=cv.RANSAC,
                          ransacReprojThreshold=5)[:]

    for i in range(4):
        x = src_corners[i].x
        y = src_corners[i].y
        Z = 1. / (h[2, 0] * x + h[2, 1] * y + h[2, 2])
        X = (h[0, 0] * x + h[0, 1] * y + h[0, 2]) * Z
        Y = (h[1, 0] * x + h[1, 1] * y + h[1, 2]) * Z
        dst_corners[i] = cv.Point(int(X), int(Y))

    return 1
Esempio n. 13
0
def get_focus_measure(src, kind):
#    from numpy import r_
#    from scipy import fft
#    from pylab import plot, show
#    w = 100
#    h = 100
#    x = (640 - w) / 2
#    y = (480 - h) / 2
#    src = crop(src, x, y, w, h)
#    src = grayspace(src)
#    d = src.ndarray
#
# #    print d[0]
# #    print d[-1]
#    fftsig = fft(d)
#    d = abs(fftsig)
#    print d.shape
#    dst = src.clone()
#    cv.convertScaleAbs(cv.asMat(d), dst, 1, 0)
#    return dst

#    xs = xrange(len(ys))
#    plot(xs, ys)
#    show()
#    N = len(d)
#    f = 50000 * r_[0:(N / 2)] / N
#    n = len(f)
# #    print f
#    d = d.transpose()
#    d = abs(fftsig[:n]) / N
#    print d
# #    plot(f, d[0], 'b', f, d[1], 'g', f, d[2], 'r')
#    plot(f, d)
#    show()

    planes = cv.vector_Mat()
    src = cv.asMat(src)
    laplace = cv.Mat(src.size(), cv.CV_16SC1)
    colorlaplace = cv.Mat(src.size(), cv.CV_8UC3)

    cv.split(src, planes)
    for plane in planes:
        cv.Laplacian(plane, laplace, 3)
        cv.convertScaleAbs(laplace, plane, 1, 0)

    cv.merge(planes, colorlaplace)
    f = colorlaplace.ndarray.flatten()
#    f.sort()
#    print f[-int(len(f) * 0.1):], int(len(f) * 0.1), len(f)
#    len(f)
    return f[-int(len(f) * 0.1):].mean()
    def process(self, input_images, connected_outs):
        if len(input_images) == 0:
            return FAIL     
        src = input_images['Input']   
        dist_res = int( self.getParamContent('Distance resolution') )
        angle_res = int( self.getParamContent('Angle resolution (degrees)') )
        acc_thresh = int( self.getParamContent('Accumulator threshold') )
        min_length = int( self.getParamContent('Minimum length') )
        max_gap = int( self.getParamContent('Maximum gap') )
        choice = self.getParamContent("Type of Hough transform")
        if src.ndim > 2:
            print "In '%s': The hough transform takes a binary image (or 8-bit) as input." %self.name
            return FAIL
        color_dst = numpy.empty( (src.shape[0], src.shape[1], 3),dtype='uint8' )
        pycv.cvtColor( pycv.asMat(src), pycv.asMat(color_dst), pycv.CV_GRAY2BGR )

        if choice == "Standard":
            lines = pycv.HoughLines( pycv.asMat(src), dist_res, pycv.CV_PI/angle_res, acc_thresh )
            margin = 0.04
            n=8
            pi = math.pi
            h,w = src.shape[0:2]
            for i in range(min(len(lines), int(self.getParamContent("draw # lines")))):
                l = lines[i]
                rho = l[0]
                theta = l[1]
                if theta > 3*pi/4: theta-=pi
                if abs(rho)<w/n and abs(theta)<margin: pass
                elif abs(rho)>w-w/n and abs(theta)<margin: pass
                elif abs(rho)<h/n and abs(theta-pi/2)<margin: pass
                elif abs(rho)>h-h/n and abs(theta-pi/2)<margin: pass
                else:
                    continue         
                a = math.cos(theta)
                b = math.sin(theta)
                x0 = a*rho 
                y0 = b*rho
                pt1 = pycv.Point( int(round(x0 + 2000*(-b))), int(round(y0 + 2000*(a))) )
                pt2 = pycv.Point( int(round(x0 - 2000*(-b))), int(round(y0 - 2000*(a))) ) 
                pycv.line( pycv.asMat(color_dst), pt1, pt2, pycv.CV_RGB(random.randint(0,255),
                                                            random.randint(0,255),
                                                            random.randint(0,255)), 2, 8 )
    
        else:
            lines = pycv.HoughLinesP( pycv.asMat(src), dist_res, 
                                    pycv.CV_PI/angle_res, acc_thresh, min_length, max_gap )
            for l in lines:
                pycv.line( pycv.asMat(color_dst), pycv.Point(int(l[0]), int(l[1])), 
                           pycv.Point(int(l[2]), int(l[3])), 
                           pycv.CV_RGB(*getRandColor()), 2, 8 )    
        self.lines = [(item[0],item[1]) for item in lines]        
        return {self.output_names[0] : color_dst, self.output_names[1]:self.lines}
    def __init__(self):
        #读入图像
        img = cv.imread("lena_full.jpg")
        img2 = cv.Mat()
        cv.cvtColor(img, img2, cv.CV_BGR2GRAY)
        img = cv.Mat()
        cv.resize(img2, img, cv.Size(N, N))
        self.fimg = fft.fft2(img[:])  # 图像的频域信号
        mag_img = np.log10(np.abs(self.fimg))

        # 创建计算用图像
        filtered_img = np.zeros((N, N), dtype=np.float)
        self.mask = np.zeros((N, N), dtype=np.float)
        self.mask_img = cv.asMat(self.mask)  # 在self.mask上绘制多边形用的图像

        # 创建数据源
        self.data = ArrayPlotData(mag_img=fft.fftshift(mag_img),
                                  filtered_img=filtered_img,
                                  mask_img=self.mask)

        # 创建三个图像绘制框以及容器
        meg_plot, img = self.make_image_plot("mag_img")
        mask_plot, _ = self.make_image_plot("mask_img")
        filtered_plot, _ = self.make_image_plot("filtered_img")
        self.plot = HPlotContainer(meg_plot, mask_plot, filtered_plot)

        # 创建套索工具
        lasso_selection = LassoSelection(component=img)
        lasso_overlay = LassoOverlay(lasso_selection=lasso_selection,
                                     component=img,
                                     selection_alpha=0.3)
        img.tools.append(lasso_selection)
        img.overlays.append(lasso_overlay)
        self.lasso_selection = lasso_selection

        # 监听套索工具的事件、开启时钟事件
        lasso_selection.on_trait_change(self.lasso_updated,
                                        "disjoint_selections")
        self.timer = Timer(50, self.on_timer)
 def __init__(self):
     #读入图像
     img = cv.imread("lena_full.jpg")
     img2 = cv.Mat()
     cv.cvtColor(img, img2, cv.CV_BGR2GRAY)
     img = cv.Mat()
     cv.resize(img2, img, cv.Size(N, N))
     self.fimg = fft.fft2(img[:]) # 图像的频域信号
     mag_img = np.log10(np.abs(self.fimg))
 
     # 创建计算用图像
     filtered_img = np.zeros((N, N), dtype=np.float)
     self.mask = np.zeros((N, N), dtype=np.float)
     self.mask_img = cv.asMat(self.mask) # 在self.mask上绘制多边形用的图像
     
     # 创建数据源
     self.data = ArrayPlotData(
         mag_img = fft.fftshift(mag_img),
         filtered_img = filtered_img,
         mask_img = self.mask
     )
     
     # 创建三个图像绘制框以及容器
     meg_plot, img = self.make_image_plot("mag_img")
     mask_plot, _ = self.make_image_plot("mask_img")       
     filtered_plot, _ = self.make_image_plot("filtered_img")
     self.plot = HPlotContainer(meg_plot, mask_plot, filtered_plot)     
     
     # 创建套索工具
     lasso_selection = LassoSelection(component=img)
     lasso_overlay = LassoOverlay(lasso_selection = lasso_selection, component=img, selection_alpha=0.3)
     img.tools.append(lasso_selection)
     img.overlays.append(lasso_overlay)
     self.lasso_selection = lasso_selection                 
     
     # 监听套索工具的事件、开启时钟事件
     lasso_selection.on_trait_change(self.lasso_updated, "disjoint_selections")
     self.timer = Timer(50, self.on_timer)
Esempio n. 17
0
img2 = cv.imread("fruits.jpg")
img_hsv2 = cv.Mat()
cv.cvtColor(img2, img_hsv2, cv.CV_BGR2HSV)

img_bp = cv.Mat()
cv.calcBackProject(cv.vector_Mat([img_hsv2]),
                   channels=channels,
                   hist=result,
                   backProject=img_bp,
                   ranges=ranges)
3  ###
img_th = cv.Mat()
cv.threshold(img_bp, img_th, 180, 255, cv.THRESH_BINARY)
4  ###
struct = np.ones((3, 3), np.uint8)
struct_mat = cv.asMat(struct, force_single_channel=True)
img_mp = cv.Mat()

cv.morphologyEx(img_th, img_mp, cv.MORPH_CLOSE, struct_mat, iterations=5)

import pylab as pl
import matplotlib.cm as cm

pl.subplot(231)
pl.imshow(img[:, :, ::-1])
pl.subplot(232)
pl.imshow(img2[:, :, ::-1])
pl.subplot(233)
pl.imshow(result[:], cmap=cm.gray)
pl.subplot(234)
pl.imshow(img_bp[:], cmap=cm.gray)
img2 = cv.imread("fruits.jpg") 
img_hsv2 = cv.Mat()
cv.cvtColor(img2, img_hsv2, cv.CV_BGR2HSV)

img_bp = cv.Mat()
cv.calcBackProject(cv.vector_Mat([img_hsv2]), 
                   channels=channels, 
                   hist=result, 
                   backProject=img_bp, 
                   ranges = ranges) 
3###
img_th = cv.Mat()
cv.threshold(img_bp, img_th, 180, 255, cv.THRESH_BINARY) 
4###
struct = np.ones((3,3), np.uint8)
struct_mat = cv.asMat(struct, force_single_channel=True)
img_mp = cv.Mat()

cv.morphologyEx(img_th, img_mp, cv.MORPH_CLOSE, struct_mat, iterations=5) 


import pylab as pl
import matplotlib.cm as cm
pl.subplot(231)
pl.imshow(img[:,:,::-1])
pl.subplot(232)
pl.imshow(img2[:,:,::-1])
pl.subplot(233)
pl.imshow(result[:], cmap=cm.gray)
pl.subplot(234)
pl.imshow(img_bp[:], cmap=cm.gray)
Esempio n. 19
0
        self.tmpbuf[:] = 0
        for s in self.slice:
            self.tmpbuf += self.w1[s]

        self.tmpbuf /= 4
        self.w2[1:-1, 1:-1] *= -1
        self.w2[1:-1, 1:-1] += self.tmpbuf
        self.w2 *= self.damping
        self.w1, self.w2 = self.w2, self.w1

        self.bmp[:, :, 0] = self.w1 + 128
        self.bmp[:, :, 1] = self.bmp[:, :, 0] - (self.bmp[:, :, 0] >> 2)
        self.bmp[:, :, 2] = self.bmp[:, :, 1]
        return self.bmp


WIDTH, HEIGHT = 640, 480
video = cv.VideoWriter()
#video.open("waterwave.avi", cv.CV_FOURCC(*"DIB "), 30, cv.Size2i(WIDTH, HEIGHT))
video.open("waterwave.avi", cv.CV_FOURCC(*"ffds"), 30,
           cv.Size2i(WIDTH, HEIGHT))
water = WaterWave(WIDTH, HEIGHT, 100, 0.97)
import time
start = time.clock()
for i in range(200):
    if i % 30 == 0: print(i)
    r = water.step()
    mat = cv.asMat(r)
    video << mat
del video
print((time.clock() - start))
# -*- coding: utf-8 -*-
import pyopencv as cv
import numpy as np

y, x = np.ogrid[-1:1:250j,-1:1:250j]
z = np.sin(10*np.sqrt(x*x+y*y))*0.5 + 0.5 
np.round(z, decimals=1, out=z) 

img = cv.asMat(z) 

cv.namedWindow("demo1")
cv.imshow("demo1", img)

img2 = cv.Mat() 
cv.Laplacian(img, img2, img.depth(), ksize=3) 

cv.namedWindow("demo2")
cv.imshow("demo2", img2)
cv.waitKey(0)
Esempio n. 21
0
# -*- coding: utf-8 -*-
import pyopencv as cv
import numpy as np

y, x = np.ogrid[-1:1:250j, -1:1:250j]
z = np.sin(10 * np.sqrt(x * x + y * y)) * 0.5 + 0.5
np.round(z, decimals=1, out=z)

img = cv.asMat(z)

cv.namedWindow("demo1")
cv.imshow("demo1", img)

img2 = cv.Mat()
cv.Laplacian(img, img2, img.depth(), ksize=3)

cv.namedWindow("demo2")
cv.imshow("demo2", img2)
cv.waitKey(0)
# 读入图片并缩小为1/2
img0 = cv.imread("lena.jpg")
size = img0.size()
w, h = size.width, size.height
img1 = cv.Mat()
cv.resize(img0, img1, cv.Size(w//2, h//2)) 

# 各种卷积核
kernels = [ 
    (u"低通滤波器",np.array([[1,1,1],[1,2,1],[1,1,1]])*0.1),
    (u"高通滤波器",np.array([[0,-1,0],[-1,5,-1],[0,-1,0]])),
    (u"边缘检测",np.array([[-1,-1,-1],[-1,8,-1],[-1,-1,-1]]))
]

index = 0
for name, kernel in kernels:
    plt.subplot(131+index)
    # 将卷积核转换为Mat对象
    kmat = cv.asMat(kernel.astype(np.float), force_single_channel=True) 
    img2 = cv.Mat()
    cv.filter2D(img1, img2, -1, kmat) 
    # 由于matplotlib的颜色顺序和OpenCV的顺序相反
    plt.imshow(img2[:,:,::-1]) 
    plt.title(name)
    index += 1
    plt.gca().set_axis_off()
plt.subplots_adjust(0.02, 0, 0.98, 1, 0.02, 0)
plt.show()    


 def redraw(self):
     img2 = cv.Mat()
     kernel = cv.asMat(self.kernel*self.scale, force_single_channel=True)
     cv.filter2D(self.img, img2, -1, kernel)
     cv.imshow("Filter Demo", img2)
 def affine(self):
     self.img2 = cv.Mat()
     M = cv.asMat(self.m, force_single_channel=True)
     cv.warpAffine(self.img1, self.img2, M, self.img1.size(), 
         borderValue=cv.CV_RGB(255,255,255))
Esempio n. 25
0
def Dilation(pos, user_data):
    element = cv.asMat(np.ones((pos*2+1, pos*2+1), 'uint8'), True)
    cv.dilate(src, dest, element)
    cv.imshow("Erosion&Dilation window",dest);
Esempio n. 26
0
def Dilation(pos, user_data):
    element = cv.asMat(np.ones((pos * 2 + 1, pos * 2 + 1), 'uint8'), True)
    cv.dilate(src, dest, element)
    cv.imshow("Erosion&Dilation window", dest)
Esempio n. 27
0
 def redraw(self):
     img2 = cv.Mat()
     kernel = cv.asMat(self.kernel * self.scale, force_single_channel=True)
     cv.filter2D(self.img, img2, -1, kernel)
     cv.imshow("Filter Demo", img2)
# -*- coding: utf-8 -*-
import pyopencv as cv
import numpy as np

img = cv.imread("lena.jpg")
size = img.size()
w, h = size
img2 = cv.Mat()
map1, map2 = np.meshgrid(
    np.linspace(0,w*2,w).astype(np.float32),
    np.linspace(0,h*2,h).astype(np.float32),
)
map1 = cv.asMat(map1)
map2 = cv.asMat(map2)
cv.remap(img, img2, map1, map2, cv.INTER_LINEAR)

cv.namedWindow( "Remap Resize", cv.CV_WINDOW_AUTOSIZE )
cv.imshow("Remap Resize", img2)
cv.waitKey(0)

Esempio n. 29
0
def new_dst(width=640, height=480, depth=3, mode='uint8'):
    dst = cv.asMat(zeros((height, width, depth), mode))
    return dst
Esempio n. 30
0
def _get_morphology_element(v):
    return cv.asMat(ones((v * 2 + 1, v * 2 + 1), 'uint8'), True)
Esempio n. 31
0
# -*- coding: utf-8 -*-
import pyopencv as cv
import numpy as np
import time

img = cv.asMat(np.random.rand(1000, 1000))

row = cv.getGaussianKernel(7, -1)
col = cv.getGaussianKernel(5, -1)

kernel = cv.asMat(np.dot(col[:], row[:].T), force_single_channel=True)

img2 = cv.Mat()
img3 = cv.Mat()

start = time.clock()
cv.filter2D(img, img2, -1, kernel)
print(("filter2D:", time.clock() - start))

start = time.clock()
cv.sepFilter2D(img, img3, -1, row, col)
print(("sepFilter3D:", time.clock() - start))

print(("error=", np.max(np.abs(img2[:] - img3[:]))))
Esempio n. 32
0
def asMat(src, *args, **kw):
    return cv.asMat(src, *args, **kw)
Esempio n. 33
0
def crop(src, x, y, w, h, mat=True):
#    print y, y + h, x, x + w
    v = src[y:y + h, x:x + w]
    if mat:
        v = cv.asMat(v)
    return v
# -*- coding: utf-8 -*-
import pyopencv as cv
import numpy as np
import time 

img = cv.asMat(np.random.rand(1000,1000)) 

row = cv.getGaussianKernel(7, -1) 
col = cv.getGaussianKernel(5, -1)

kernel = cv.asMat(np.dot(col[:], row[:].T), force_single_channel=True) 

img2 = cv.Mat()
img3 = cv.Mat()

start = time.clock()
cv.filter2D(img, img2, -1, kernel) 
print "filter2D:", time.clock() - start

start = time.clock()
cv.sepFilter2D(img, img3, -1, row, col) 
print "sepFilter3D:", time.clock() - start

print "error=", np.max(np.abs(img2[:] - img3[:])) 
Esempio n. 35
0
    def process(self, input_images, connected_outs):
        if len(input_images) == 0:
            return FAIL
        src = input_images['Input']
        dist_res = int(self.getParamContent('Distance resolution'))
        angle_res = int(self.getParamContent('Angle resolution (degrees)'))
        acc_thresh = int(self.getParamContent('Accumulator threshold'))
        min_length = int(self.getParamContent('Minimum length'))
        max_gap = int(self.getParamContent('Maximum gap'))
        choice = self.getParamContent("Type of Hough transform")
        if src.ndim > 2:
            print "In '%s': The hough transform takes a binary image (or 8-bit) as input." % self.name
            return FAIL
        color_dst = numpy.empty((src.shape[0], src.shape[1], 3), dtype='uint8')
        pycv.cvtColor(pycv.asMat(src), pycv.asMat(color_dst), pycv.CV_GRAY2BGR)

        if choice == "Standard":
            lines = pycv.HoughLines(pycv.asMat(src), dist_res,
                                    pycv.CV_PI / angle_res, acc_thresh)
            margin = 0.04
            n = 8
            pi = math.pi
            h, w = src.shape[0:2]
            for i in range(
                    min(len(lines),
                        int(self.getParamContent("draw # lines")))):
                l = lines[i]
                rho = l[0]
                theta = l[1]
                if theta > 3 * pi / 4: theta -= pi
                if abs(rho) < w / n and abs(theta) < margin: pass
                elif abs(rho) > w - w / n and abs(theta) < margin: pass
                elif abs(rho) < h / n and abs(theta - pi / 2) < margin: pass
                elif abs(rho) > h - h / n and abs(theta - pi / 2) < margin:
                    pass
                else:
                    continue
                a = math.cos(theta)
                b = math.sin(theta)
                x0 = a * rho
                y0 = b * rho
                pt1 = pycv.Point(int(round(x0 + 2000 * (-b))),
                                 int(round(y0 + 2000 * (a))))
                pt2 = pycv.Point(int(round(x0 - 2000 * (-b))),
                                 int(round(y0 - 2000 * (a))))
                pycv.line(
                    pycv.asMat(color_dst), pt1, pt2,
                    pycv.CV_RGB(random.randint(0, 255), random.randint(0, 255),
                                random.randint(0, 255)), 2, 8)

        else:
            lines = pycv.HoughLinesP(pycv.asMat(src), dist_res,
                                     pycv.CV_PI / angle_res, acc_thresh,
                                     min_length, max_gap)
            for l in lines:
                pycv.line(pycv.asMat(color_dst),
                          pycv.Point(int(l[0]), int(l[1])),
                          pycv.Point(int(l[2]), int(l[3])),
                          pycv.CV_RGB(*getRandColor()), 2, 8)
        self.lines = [(item[0], item[1]) for item in lines]
        return {
            self.output_names[0]: color_dst,
            self.output_names[1]: self.lines
        }
Esempio n. 36
0
def Closing(pos, user_data):
    element = cv.asMat(np.ones((pos*2+1, pos*2+1), 'uint8'), True)
    cv.dilate(src, image, element)
    cv.erode(image, dest, element)
    cv.imshow("Opening&Closing window",dest);
Esempio n. 37
0
import pyopencv as cv
import numpy as np
import matplotlib.pyplot as plt

# 读入图片并缩小为1/2
img0 = cv.imread("lena.jpg")
size = img0.size()
w, h = size.width, size.height
img1 = cv.Mat()
cv.resize(img0, img1, cv.Size(w // 2, h // 2))

# 各种卷积核
kernels = [("低通滤波器", np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]]) * 0.1),
           ("高通滤波器", np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])),
           ("边缘检测", np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]))]

index = 0
for name, kernel in kernels:
    plt.subplot(131 + index)
    # 将卷积核转换为Mat对象
    kmat = cv.asMat(kernel.astype(np.float), force_single_channel=True)
    img2 = cv.Mat()
    cv.filter2D(img1, img2, -1, kmat)
    # 由于matplotlib的颜色顺序和OpenCV的顺序相反
    plt.imshow(img2[:, :, ::-1])
    plt.title(name)
    index += 1
    plt.gca().set_axis_off()
plt.subplots_adjust(0.02, 0, 0.98, 1, 0.02, 0)
plt.show()
Esempio n. 38
0
def Closing(pos, user_data):
    element = cv.asMat(np.ones((pos * 2 + 1, pos * 2 + 1), 'uint8'), True)
    cv.dilate(src, image, element)
    cv.erode(image, dest, element)
    cv.imshow("Opening&Closing window", dest)