Exemplo n.º 1
0
    def redraw(self):

        edge_img = cv.Mat()
        # 边缘检测
        cv.Canny(self.img_gray, edge_img, self.th1, self.th2)
        3  ###
        # 计算结果图
        if self.show_canny:
            show_img = cv.Mat()
            cv.cvtColor(edge_img, show_img, cv.CV_GRAY2BGR)
        else:
            show_img = self.img.clone()
        4  ###
        # 线段检测
        theta = self.theta / 180.0 * np.pi
        lines = cv.HoughLinesP(edge_img, self.rho, theta, self.hough_th,
                               self.minlen, self.maxgap)
        for line in lines:
            cv.line(show_img, cv.asPoint(line[:2]), cv.asPoint(line[2:]),
                    cv.CV_RGB(255, 0, 0), 2)
        5  ###
        # 圆形检测
        circles = cv.HoughCircles(self.img_smooth,
                                  3,
                                  self.dp,
                                  self.mindist,
                                  param1=self.param1,
                                  param2=self.param2)

        for circle in circles:
            cv.circle(show_img, cv.Point(int(circle[0]), int(circle[1])),
                      int(circle[2]), cv.CV_RGB(0, 255, 0), 2)

        cv.imshow("Hough Demo", show_img)
 def make_grid_img(self):
     img = self.img.clone()
     for i in xrange(0, self.w, 30):
         cv.line(img, cv.Point(i, 0), cv.Point(i, self.h), cv.CV_RGB(0, 0, 0), 1)
     for i in xrange(0, self.h, 30):
         cv.line(img, cv.Point(0, i), cv.Point(self.w, i), cv.CV_RGB(0, 0, 0), 1)
     return img
Exemplo n.º 3
0
def draw_lines(src, lines, color=(255, 0, 0), thickness=3):
    if lines:
        for p1, p2 in lines:
            p1 = new_point(*p1)
            p2 = new_point(*p2)
            cv.line(src, p1, p2,
                   convert_color(color), thickness, 8)
Exemplo n.º 4
0
    def redraw(self):
        # 同时显示两幅图像
        w = self.img1.size().width
        h = self.img1.size().height
        show_img = cv.Mat(cv.Size(w * 2, h), cv.CV_8UC3)
        for i in range(3):
            show_img[:, :w, i] = self.img1[:]
            show_img[:, w:, i] = self.img2[:]

        # 绘制特征线条
        if self.draw_circle:
            self.draw_keypoints(show_img, self.keypoints1, 0)
            self.draw_keypoints(show_img, self.keypoints2, w)

        # 绘制直线连接距离小于阈值的两个特征点
        for idx1 in np.where(self.mindist < self.max_distance)[0]:
            idx2 = self.idx_mindist[idx1]
            pos1 = self.keypoints1[int(idx1)].pt
            pos2 = self.keypoints2[int(idx2)].pt

            p1 = cv.Point(int(pos1.x), int(pos1.y))
            p2 = cv.Point(int(pos2.x) + w, int(pos2.y))
            cv.line(show_img, p1, p2, cv.CV_RGB(0, 255, 255), lineType=16)

        cv.imshow("SURF Demo", show_img)
 def redraw(self):
     # 同时显示两幅图像
     w = self.img1.size().width
     h = self.img1.size().height
     show_img = cv.Mat(cv.Size(w*2, h), cv.CV_8UC3)
     for i in xrange(3):
         show_img[:,:w,i] = self.img1[:]
         show_img[:,w:,i] = self.img2[:]
     
     # 绘制特征线条
     if self.draw_circle:
         self.draw_keypoints(show_img, self.keypoints1, 0)
         self.draw_keypoints(show_img, self.keypoints2, w)
     
     
     # 绘制直线连接距离小于阈值的两个特征点
     for idx1 in np.where(self.mindist < self.max_distance)[0]:  
         idx2 = self.idx_mindist[idx1]
         pos1 = self.keypoints1[int(idx1)].pt  
         pos2 = self.keypoints2[int(idx2)].pt
         
         p1 = cv.Point(int(pos1.x), int(pos1.y)) 
         p2 = cv.Point(int(pos2.x)+w, int(pos2.y)) 
         cv.line(show_img, p1, p2, cv.CV_RGB(0,255,255), lineType=16)
     
     
     cv.imshow("SURF Demo",show_img)
 def redraw(self):
     
     edge_img = cv.Mat()
     # 边缘检测
     cv.Canny(self.img_gray, edge_img, self.th1, self.th2)
     3###
     # 计算结果图
     if self.show_canny: 
         show_img = cv.Mat()
         cv.cvtColor(edge_img, show_img, cv.CV_GRAY2BGR)
     else:
         show_img = self.img.clone()
     4### 
     # 线段检测   
     theta = self.theta / 180.0 * np.pi
     lines = cv.HoughLinesP(edge_img,  
         self.rho, theta, self.hough_th, self.minlen, self.maxgap)
     for line in lines: 
         cv.line(show_img, 
             cv.asPoint(line[:2]),  
             cv.asPoint(line[2:]),
             cv.CV_RGB(255, 0, 0), 2)
     5###
     # 圆形检测
     circles = cv.HoughCircles(self.img_smooth, 3,  
         self.dp, self.mindist, param1=self.param1, param2=self.param2)
         
     for circle in circles: 
         cv.circle(show_img, 
             cv.Point(int(circle[0]), int(circle[1])), int(circle[2]), 
             cv.CV_RGB(0, 255, 0), 2)
     
     cv.imshow("Hough Demo", show_img)
Exemplo n.º 7
0
 def make_grid_img(self):
     img = self.img.clone()
     for i in range(0, self.w, 30):
         cv.line(img, cv.Point(i, 0), cv.Point(i, self.h),
                 cv.CV_RGB(0, 0, 0), 1)
     for i in range(0, self.h, 30):
         cv.line(img, cv.Point(0, i), cv.Point(self.w, i),
                 cv.CV_RGB(0, 0, 0), 1)
     return img
Exemplo n.º 8
0
def find_lines(src, t1, minlen=100):
#    dst = canny(src, t1, t2)

    dst = threshold(src, t1, invert=True)
    lines = cv.HoughLinesP(dst, 1, cv.CV_PI / 180, 5, minlen, 20)
#    print lines
    dst = colorspace(dst)
    for l in lines:
        cv.line(dst, new_point(int(l[0]), int(l[1])),
                new_point(int(l[2]), int(l[3])), convert_color((255, 0, 0)),
                3, 8)
    return dst, lines
    def process(self, input_images, connected_outs):
        if len(input_images) == 0:
            return FAIL     
        src = input_images['Input']   
        dist_res = int( self.getParamContent('Distance resolution') )
        angle_res = int( self.getParamContent('Angle resolution (degrees)') )
        acc_thresh = int( self.getParamContent('Accumulator threshold') )
        min_length = int( self.getParamContent('Minimum length') )
        max_gap = int( self.getParamContent('Maximum gap') )
        choice = self.getParamContent("Type of Hough transform")
        if src.ndim > 2:
            print "In '%s': The hough transform takes a binary image (or 8-bit) as input." %self.name
            return FAIL
        color_dst = numpy.empty( (src.shape[0], src.shape[1], 3),dtype='uint8' )
        pycv.cvtColor( pycv.asMat(src), pycv.asMat(color_dst), pycv.CV_GRAY2BGR )

        if choice == "Standard":
            lines = pycv.HoughLines( pycv.asMat(src), dist_res, pycv.CV_PI/angle_res, acc_thresh )
            margin = 0.04
            n=8
            pi = math.pi
            h,w = src.shape[0:2]
            for i in range(min(len(lines), int(self.getParamContent("draw # lines")))):
                l = lines[i]
                rho = l[0]
                theta = l[1]
                if theta > 3*pi/4: theta-=pi
                if abs(rho)<w/n and abs(theta)<margin: pass
                elif abs(rho)>w-w/n and abs(theta)<margin: pass
                elif abs(rho)<h/n and abs(theta-pi/2)<margin: pass
                elif abs(rho)>h-h/n and abs(theta-pi/2)<margin: pass
                else:
                    continue         
                a = math.cos(theta)
                b = math.sin(theta)
                x0 = a*rho 
                y0 = b*rho
                pt1 = pycv.Point( int(round(x0 + 2000*(-b))), int(round(y0 + 2000*(a))) )
                pt2 = pycv.Point( int(round(x0 - 2000*(-b))), int(round(y0 - 2000*(a))) ) 
                pycv.line( pycv.asMat(color_dst), pt1, pt2, pycv.CV_RGB(random.randint(0,255),
                                                            random.randint(0,255),
                                                            random.randint(0,255)), 2, 8 )
    
        else:
            lines = pycv.HoughLinesP( pycv.asMat(src), dist_res, 
                                    pycv.CV_PI/angle_res, acc_thresh, min_length, max_gap )
            for l in lines:
                pycv.line( pycv.asMat(color_dst), pycv.Point(int(l[0]), int(l[1])), 
                           pycv.Point(int(l[2]), int(l[3])), 
                           pycv.CV_RGB(*getRandColor()), 2, 8 )    
        self.lines = [(item[0],item[1]) for item in lines]        
        return {self.output_names[0] : color_dst, self.output_names[1]:self.lines}
w, h = img.size().width, img.size().height

def blend(img, img2): 
    """
    混合两幅图像, 其中img2有4个通道
    """
    #使用alpha通道计算img2的混和值
    b = img2[:,:,3:] / 255.0     
    a = 1 - b # img的混合值

    #混合两幅图像
    img[:,:,:3] *= a  
    img[:,:,:3] += b * img2[:,:,:3]

img2[:] = 0
for i in xrange(0, w, w/10): 
    cv.line(img2, cv.Point(i,0), cv.Point(i, h),  
        cv.Scalar(0, 0, 255, i*255/w), 5)

blend(img, img2) 

img2[:] = 0        
for i in xrange(0, h, h/10):
    cv.line(img2, cv.Point(0,i), cv.Point(w, i), 
        cv.Scalar(0, 255, 0, i*255/h), 5)
        
blend(img, img2)
     
cv.namedWindow("Draw Demo")
cv.imshow("Draw Demo", img)
cv.waitKey(0)
Exemplo n.º 11
0
    def process(self, input_images, connected_outs):
        if len(input_images) == 0:
            return FAIL
        src = input_images['Input']
        dist_res = int(self.getParamContent('Distance resolution'))
        angle_res = int(self.getParamContent('Angle resolution (degrees)'))
        acc_thresh = int(self.getParamContent('Accumulator threshold'))
        min_length = int(self.getParamContent('Minimum length'))
        max_gap = int(self.getParamContent('Maximum gap'))
        choice = self.getParamContent("Type of Hough transform")
        if src.ndim > 2:
            print "In '%s': The hough transform takes a binary image (or 8-bit) as input." % self.name
            return FAIL
        color_dst = numpy.empty((src.shape[0], src.shape[1], 3), dtype='uint8')
        pycv.cvtColor(pycv.asMat(src), pycv.asMat(color_dst), pycv.CV_GRAY2BGR)

        if choice == "Standard":
            lines = pycv.HoughLines(pycv.asMat(src), dist_res,
                                    pycv.CV_PI / angle_res, acc_thresh)
            margin = 0.04
            n = 8
            pi = math.pi
            h, w = src.shape[0:2]
            for i in range(
                    min(len(lines),
                        int(self.getParamContent("draw # lines")))):
                l = lines[i]
                rho = l[0]
                theta = l[1]
                if theta > 3 * pi / 4: theta -= pi
                if abs(rho) < w / n and abs(theta) < margin: pass
                elif abs(rho) > w - w / n and abs(theta) < margin: pass
                elif abs(rho) < h / n and abs(theta - pi / 2) < margin: pass
                elif abs(rho) > h - h / n and abs(theta - pi / 2) < margin:
                    pass
                else:
                    continue
                a = math.cos(theta)
                b = math.sin(theta)
                x0 = a * rho
                y0 = b * rho
                pt1 = pycv.Point(int(round(x0 + 2000 * (-b))),
                                 int(round(y0 + 2000 * (a))))
                pt2 = pycv.Point(int(round(x0 - 2000 * (-b))),
                                 int(round(y0 - 2000 * (a))))
                pycv.line(
                    pycv.asMat(color_dst), pt1, pt2,
                    pycv.CV_RGB(random.randint(0, 255), random.randint(0, 255),
                                random.randint(0, 255)), 2, 8)

        else:
            lines = pycv.HoughLinesP(pycv.asMat(src), dist_res,
                                     pycv.CV_PI / angle_res, acc_thresh,
                                     min_length, max_gap)
            for l in lines:
                pycv.line(pycv.asMat(color_dst),
                          pycv.Point(int(l[0]), int(l[1])),
                          pycv.Point(int(l[2]), int(l[3])),
                          pycv.CV_RGB(*getRandColor()), 2, 8)
        self.lines = [(item[0], item[1]) for item in lines]
        return {
            self.output_names[0]: color_dst,
            self.output_names[1]: self.lines
        }
Exemplo n.º 12
0
    # create a correspond Mat
    correspond = cv.Mat(image.rows + object.rows, image.cols, cv.CV_8UC1,
                        cv.Scalar(0))

    # copy the images to correspond -- numpy way
    correspond[:object.rows, :object.cols] = object[:]
    correspond[object.rows:, :image.cols] = image[:]

    # find pairs
    ptpairs = findPairs(surf, objectKeypoints, objectDescriptors,
                        imageKeypoints, imageDescriptors)

    for pair in ptpairs:
        cv.line(correspond, cv.asPoint(pair[0]),
                cv.Point(int(pair[1].x), int(pair[1].y + object.rows)),
                colors[8])

    # locate planar object
    if locatePlanarObject(ptpairs, src_corners, dst_corners):
        for i in range(4):
            r1 = dst_corners[i]
            r2 = dst_corners[(i + 1) % 4]
            cv.line(correspond, cv.Point(r1.x, r1.y + object.rows),
                    cv.Point(r2.x, r2.y + object.rows), colors[8])

    # show the object correspondents
    cv.imshow("Object Correspond", correspond)

    # draw circles
    for keypt in objectKeypoints:
Exemplo n.º 13
0
    print("Image Descriptors: %d\n" % len(imageKeypoints))
    tt = float(cv.getTickCount()) - tt
    print("Extraction time = %gms\n" % (tt/(cv.getTickFrequency()*1000.)))
    
    # create a correspond Mat
    correspond = cv.Mat(image.rows+object.rows, image.cols, cv.CV_8UC1, cv.Scalar(0))
    
    # copy the images to correspond -- numpy way
    correspond[:object.rows, :object.cols] = object[:]
    correspond[object.rows:, :image.cols] = image[:]

    # find pairs
    ptpairs = findPairs(surf, objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors)

    for pair in ptpairs:
        cv.line( correspond, cv.asPoint(pair[0]), cv.Point(int(pair[1].x), int(pair[1].y+object.rows)), colors[8] )

    # locate planar object
    if locatePlanarObject( ptpairs, src_corners, dst_corners ):
        for i in range(4):
            r1 = dst_corners[i]
            r2 = dst_corners[(i+1)%4]
            cv.line( correspond, cv.Point(r1.x, r1.y+object.rows ), cv.Point(r2.x, r2.y+object.rows ), colors[8] )

    # show the object correspondents
    cv.imshow("Object Correspond", correspond)
    
    # draw circles
    for keypt in objectKeypoints:
        cv.circle(object_color, cv.asPoint(keypt.pt), int(keypt.size*1.2/9.*2), colors[0], 1, 8, 0)
    cv.imshow("Object", object_color)