Exemple #1
0
def erode_image(image, v, h):
    vImage = cv.CreateImage((image.width, image.height), image.depth,
                            image.nChannels)
    hImage = cv.CreateImage((image.width, image.height), image.depth,
                            image.nChannels)
    vElement = cv.CreateStructuringElementEx(1, v, 0, v / 2, cv.CV_SHAPE_RECT)
    hElement = cv.CreateStructuringElementEx(h, 1, h / 2, 0, cv.CV_SHAPE_RECT)
    cv.Erode(image, vImage, vElement, 1)
    cv.Erode(vImage, hImage, hElement, 1)
    return hImage
Exemple #2
0
def erodeImage(img):
    kernel = cv.CreateStructuringElementEx(9, 9, 5, 5, cv.CV_SHAPE_CROSS)
    # Erode- replaces pixel value with lowest value pixel in kernel
    cv.Erode(img, img, kernel, 2)
    # Dilate- replaces pixel value with highest value pixel in kernel
    cv.Dilate(img, img, kernel, 2)
    return img
Exemple #3
0
    def observe(self):
        '''Grab the latest image, detect the table on it, and write the observation to memory'''
        temp = self.get_last_observation()
        if temp == None or not temp.nChannels == 1:
            return

        img = cv.CreateImage(cv.GetSize(temp), temp.depth, temp.nChannels)

        SE = cv.CreateStructuringElementEx(50, 50, 25, 25, cv.CV_SHAPE_RECT)
        cv.MorphologyEx(temp, img, temp, SE, cv.CV_MOP_CLOSE, iterations=1)
        # only look at the pixels that are interesting, just cut off the Nao and stuff.
        cv.SetImageROI(img, config["roi"])

        # remove tilt
        self.filter.filter(img)

        # cut the image into layers of interest
        layers = self.segmentizer.segmentize(img)

        # the HistogramSegmentizer can fail when no peaks are ever found
        if not layers:
            return

        # find arms and objects in the table layer of the image
        observation = self.detector.detect(layers['table'])

        if observation:
            # write the observation to memory
            self.emit(observation)
Exemple #4
0
def dilationofimage():
    display(src_image, "Source Image")
    struc = cv.CreateStructuringElementEx(10, 10, 5, 5, cv.CV_SHAPE_RECT)
    cv.Dilate(src_image, dst_image, struc, 1)
    display(dst_image, "Dilation")
    cv.WaitKey(0)
    cv.DestroyWindow("Dilation")
def blackhat():
    display(src_image,"source_image")
    struc=cv.CreateStructuringElementEx(5,5,3,3,cv.CV_SHAPE_RECT)
    dst=cv.CreateImage((src_image.width,src_image.height),8,src_image.channels)
    temp=cv.CreateImage((src_image.width,src_image.height),8,src_image.channels)
    cv.MorphologyEx(src_image,dst,temp,struc,cv2.MORPH_BLACKHAT,1)
    display(dst,"destination image")
    cv.WaitKey(0)
    cv.DestroyAllWindows()
Exemple #6
0
    def processImage(self, frame):
        cv.CvtColor(frame, self.frame2gray, cv.CV_RGB2GRAY)

        #Absdiff to get the difference between to the frames
        cv.AbsDiff(self.frame1gray, self.frame2gray, self.res)

        #Remove the noise and do the threshold
        cv.Smooth(self.res, self.res, cv.CV_BLUR, 5,5)
        element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5,  cv.CV_SHAPE_RECT)
        cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_OPEN)
        cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_CLOSE)
        cv.Threshold(self.res, self.res, 10, 255, cv.CV_THRESH_BINARY_INV)
Exemple #7
0
def findImage(img):
    #Set up storage for images
    frame_size = cv.GetSize(img)
    img2 = cv.CreateImage(frame_size,8,3)
    tmp = cv.CreateImage(frame_size,8,cv.CV_8U)
    h = cv.CreateImage(frame_size,8,1)

    #copy original image to do work on
    cv.Copy(img,img2)

    #altering the image a bit for smoother processing
    cv.Smooth(img2,img2,cv.CV_BLUR,3)
    cv.CvtColor(img2,img2,cv.CV_BGR2HSV)

    #make sure temp is empty
    cv.Zero(tmp)

    #detection based on HSV value
    #30,100,90 lower limit on pic 41,255,255 on pic
    #cv.InRangeS(img2,cv.Scalar(25,100,87),cv.Scalar(50,255,255),tmp)
    #Range for green plate dot in my Living room
    #cv.InRangeS(img2,cv.Scalar(55,80,60),cv.Scalar(65,95,90),tmp)
    #classroom
    #cv.InRangeS(img2,cv.Scalar(55,80,60),cv.Scalar(70,110,70),tmp)
    #Kutztowns Gym
    cv.InRangeS(img2,cv.Scalar(65,100,112),cv.Scalar(85,107,143),tmp)

    elmt_shape=cv.CV_SHAPE_ELLIPSE
    pos = 3
    element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, elmt_shape)
    cv.Dilate(tmp,tmp,element,6)
    cv.Erode(tmp,tmp,element,2)

    cv.Split(tmp,h,None,None,None)
    storage = cv.CreateMemStorage()

    scan = sc.FindContours(h,storage)
    xyImage=drawCircles(scan,img)

    if xyImage != None:
            return (xyImage,tmp)
    else:
            return None
Exemple #8
0
        #cv.Smooth(plate, zzz)
        #
        #cv.PyrMeanShiftFiltering(plate, zzz, 40, 15)
        foo = anpr.greyscale(plate)
        segmented = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        bar = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        cv.EqualizeHist(foo, segmented)

        cv.AdaptiveThreshold(
            segmented, bar, 255, cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
            cv.CV_THRESH_BINARY_INV,
            plate.height % 2 == 0 and (plate.height + 1) or plate.height,
            plate.height / 2)

        baz = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        el = cv.CreateStructuringElementEx(1, 2, 0, 0, cv.CV_SHAPE_RECT)
        cv.Erode(bar, baz, el)
        # quick_show(plate)
        print 'baz'
        quick_show(baz)
        print 'bar'
        quick_show(bar)
        print 'segmented'
        quick_show(segmented)
        image_path = 'plate.png'
        image_path2 = 'plate2.png'

        cv.SaveImage(image_path, foo)

        for tool in tools:
            print("Will use tool '%s'" % (tool.get_name()))
Exemple #9
0
frame2gray = cv.CreateMat(frame1.height, frame1.width, cv.CV_8U)

w = frame2gray.width
h = frame2gray.height
nb_pixels = frame2gray.width * frame2gray.height

while True:
    frame2 = cv.QueryFrame(capture)
    cv.CvtColor(frame2, frame2gray, cv.CV_RGB2GRAY)

    cv.AbsDiff(frame1gray, frame2gray, res)
    cv.ShowImage("After AbsDiff", res)

    cv.Smooth(res, res, cv.CV_BLUR, 5, 5)
    element = cv.CreateStructuringElementEx(5 * 2 + 1, 5 * 2 + 1, 5, 5,
                                            cv.CV_SHAPE_RECT)
    cv.MorphologyEx(res, res, None, None, cv.CV_MOP_OPEN)
    cv.MorphologyEx(res, res, None, None, cv.CV_MOP_CLOSE)
    cv.Threshold(res, res, 10, 255, cv.CV_THRESH_BINARY_INV)

    cv.ShowImage("Image", frame2)
    cv.ShowImage("Res", res)

    # -----------
    nb = 0
    for y in range(h):
        for x in range(w):
            if res[y, x] == 0.0:
                nb += 1
    avg = (nb * 100.0) / nb_pixels
    # print "Average: ",avg, "%\r",
Exemple #10
0
i = 0
for files in glob.glob('D:/pic/car/*.jpg'):
    filepath,filename = os.path.split(files)
    image = cv2.imread(filepath + '/' + filename)
     # image = cv2.imread('D:/pic/car2.jpg')
    h,w = image.shape[:2]
    #灰度化
    gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    grayIPimage = cv.GetImage(cv.fromarray(gray))
    sobel  = cv.CreateImage((w, h),cv2.IPL_DEPTH_16S, 1)  #创建一张深度为16位有符号(-65536~65535)的的图像区域保持处理结果
    cv.Sobel(grayIPimage,sobel,2,0,7)         # 进行x方向的sobel检测
    temp  = cv.CreateImage(cv.GetSize(sobel),cv2.IPL_DEPTH_8U, 1)       #图像格式转换回8位深度已进行下一步处理
    cv.ConvertScale(sobel, temp,0.00390625, 0)
    cv.Threshold(temp, temp, 0, 255, cv2.THRESH_OTSU)
    kernal = cv.CreateStructuringElementEx(3,1, 1, 0, 0)
    cv.Dilate(temp, temp,kernal,2)
    cv.Erode(temp, temp,kernal,4)
    cv.Dilate(temp, temp,kernal,2)
#     cv.ShowImage('1', temp)
    kernal = cv.CreateStructuringElementEx(1,3, 0, 1, 0)
    cv.Erode(temp, temp,kernal,1)
    cv.Dilate(temp, temp,kernal,3)
#     cv.ShowImage('2', temp)
    temp = np.asarray(cv.GetMat(temp))
    contours, heirs  = cv2.findContours(temp,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    
    for tours in contours:
        rc = cv2.boundingRect(tours)
        if rc[2]/rc[3] >= 2:
            #rc[0] 表示图像左上角的纵坐标,rc[1] 表示图像左上角的横坐标,rc[2] 表示图像的宽度,rc[3] 表示图像的高度,
Exemple #11
0
#cv.CvtColor(thumb, cvt, cv.CV_RGB2BGR)
#cv.NamedWindow('Image', cv.CV_WINDOW_AUTOSIZE)

grey = cv.CreateImage(cv.GetSize(thumb), 8, 1)
cv.CvtColor(thumb, grey, cv.CV_RGB2GRAY)
cv.ShowImage('Greyed', grey)

smoothed = cv.CloneImage(thumb)
cv.Smooth(thumb, smoothed, cv.CV_MEDIAN)
cv.ShowImage('Smoothed', smoothed)

cv.EqualizeHist(grey, grey)
cv.ShowImage('Equalized', grey)

threshold1 = cv.CloneImage(grey)
cv.Threshold(threshold1, threshold1, 100, 255, cv.CV_THRESH_BINARY)
cv.ShowImage('Threshold1', threshold1)

threshold2 = cv.CloneImage(grey)
cv.Threshold(threshold2, threshold2, 100, 255, cv.CV_THRESH_OTSU)
cv.ShowImage('Threshold2', threshold2)

element_shape = cv.CV_SHAPE_RECT
pos = 3
element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                        element_shape)
cv.Dilate(grey, grey, element, 2)

cv.ShowImage('Dilated', grey)
cv.WaitKey(0)
Exemple #12
0
def Dilation(pos):
    element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                            element_shape)
    cv.Dilate(src, dest, element, 1)
    cv.ShowImage("Erosion & Dilation", dest)
Exemple #13
0
def Closing(pos):
    element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                            element_shape)
    cv.Dilate(src, image, element, 1)
    cv.Erode(image, dest, element, 1)
    cv.ShowImage("Opening & Closing", dest)
Exemple #14
0
def findBlob(rgbimage, hsvimage, maskimage, blobimage, hsvcolorrange, hsvmin,
             hsvmax):

    cv.CvtColor(rgbimage, hsvimage, cv.CV_BGR2HSV)
    hsvmin = [
        hsvmin[0] - hsvcolorrange[0], hsvmin[1] - hsvcolorrange[1],
        hsvmin[2] - hsvcolorrange[2]
    ]
    hsvmax = [
        hsvmax[0] + hsvcolorrange[0], hsvmax[1] + hsvcolorrange[1],
        hsvmax[2] + hsvcolorrange[2]
    ]
    if hsvmin[0] < 0:
        hsvmin[0] = 0
    if hsvmin[1] < 0:
        hsvmin[1] = 0
    if hsvmin[2] < 0:
        hsvmin[2] = 0

    if hsvmax[0] > 255:
        hsvmax[0] = 255
    if hsvmax[1] > 255:
        hsvmax[1] = 255
    if hsvmax[2] > 255:
        hsvmax[2] = 255

    cv.InRangeS(hsvimage, cv.Scalar(hsvmin[0], hsvmin[1], hsvmin[2]),
                cv.Scalar(hsvmax[0], hsvmax[1], hsvmax[2]), maskimage)

    element = cv.CreateStructuringElementEx(5, 5, 2, 2, cv.CV_SHAPE_RECT)
    cv.Erode(maskimage, maskimage, element, 1)
    cv.Dilate(maskimage, maskimage, element, 1)
    storage = cv.CreateMemStorage(0)

    cv.Copy(maskimage, blobimage)
    contour = cv.FindContours(maskimage, storage, cv.CV_RETR_CCOMP,
                              cv.CV_CHAIN_APPROX_SIMPLE)

    trackedpoint = None
    maxtrackedpoint = None

    maxareasize = 0

    #You can tune these value to improve tracking
    maxarea = 0
    minarea = 1

    areasize = 0

    while contour:
        bound_rect = cv.BoundingRect(list(contour))
        contour = contour.h_next()
        pt1 = (bound_rect[0], bound_rect[1])
        pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
        areasize = fabs(bound_rect[2] * bound_rect[3])
        if (areasize > maxareasize):
            maxareasize = areasize
            maxtrackedpoint = (int(
                (pt1[0] + pt2[0]) / 2), int((pt1[1] + pt2[1]) / 2), 1.0)
            cv.Rectangle(rgb_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)

    trackedpoint = maxtrackedpoint
    if (trackedpoint != None):
        cv.Circle(rgb_image, (trackedpoint[0], trackedpoint[1]), 5,
                  cv.CV_RGB(255, 0, 0), 1)
    return trackedpoint
Exemple #15
0
def detectObject(filename):
    img=cv.LoadImage(filename)
    '''
    #get color histogram
    '''
   
#     im32f=np.zeros((img.shape[:2]),np.uint32)
    hist_range=[[0,256],[0,256],[0,256]]
    im32f=cv.CreateImage(cv.GetSize(img), cv2.IPL_DEPTH_32F, 3)
    cv.ConvertScale(img, im32f)
    
    
    hist=cv.CreateHist([32,32,32],cv.CV_HIST_ARRAY,hist_range,3)
    '''
    #create three histogram'''
    b=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    g=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    r=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    
   
    '''
    #create image backproject 32f, 8u
    '''
    backproject32f=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_32F,1)
    backproject8u=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_8U,1)
    '''
    #create binary
    '''
    bw=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_8U,1)
    '''
    #create kernel image
    '''
    kernel=cv.CreateStructuringElementEx(3, 3, 1, 1, cv2.MORPH_ELLIPSE)
    cv.Split(im32f, b, g, r,None)

    planes=[b,g,r]
    cv.CalcHist(planes, hist)
    '''
    #find min and max histogram bin.
    '''
    minval=maxval=0.0
    min_idx=max_idx=0
    minval, maxval, min_idx, max_idx=cv.GetMinMaxHistValue(hist)
    '''
    # threshold histogram.  this sets the bin values that are below the threshold
    to zero
    '''
    cv.ThreshHist(hist, maxval/32.0)
    '''
    #backproject the thresholded histogram, backprojection should contian higher values for
    #background and lower values for the foreground
    '''
    cv.CalcBackProject(planes, backproject32f, hist)
    '''
    #convert to 8u type
    '''
    val_min=val_max=0.0
    idx_min=idx_max=0
    val_min,val_max,idx_min,idx_max=cv.MinMaxLoc(backproject32f)
    cv.ConvertScale(backproject32f, backproject8u,255.0/maxval)
    '''
    #threshold backprojected image. this gives us the background
    '''
    cv.Threshold(backproject8u, bw, 10, 255, cv2.THRESH_BINARY)
    '''
    #some morphology on background
    '''
    cv.Dilate(bw, bw,kernel,1)
    cv.MorphologyEx(bw, bw, None,kernel, cv2.MORPH_CLOSE, 2)
    '''
    #get the foreground
    '''
    cv.SubRS(bw,cv.Scalar(255,255,255),bw)
    cv.MorphologyEx(bw, bw, None, kernel,cv2.MORPH_OPEN,2)
    cv.Erode(bw, bw, kernel, 1)
    '''
    #find contours of foreground
    #Grabcut
    '''
    size=cv.GetSize(bw)
    color=np.asarray(img[:,:])
    fg=np.asarray(bw[:,:])
#     mask=cv.CreateMat(size[1], size[0], cv2.CV_8UC1)
    '''
    #Make anywhere black in the grey_image (output from MOG) as likely background
    #Make anywhere white in the grey_image (output from MOG) as definite foreground
    '''
    rect = (0,0,0,0)
   
    mat_mask=np.zeros((size[1],size[0]),dtype='uint8')
    mat_mask[:,:]=fg
    
    mat_mask[mat_mask[:,:] == 0] = 2
    mat_mask[mat_mask[:,:] == 255] = 1
    
    '''
    #Make containers 
    '''                               
    bgdModel = np.zeros((1, 13 * 5))
    fgdModel = np.zeros((1, 13 * 5))
    cv2.grabCut(color, mat_mask, rect, bgdModel, fgdModel,cv2.GC_INIT_WITH_MASK)
    '''
    #Multiple new mask by original image to get cut
    '''
    mask2 = np.where((mat_mask==0)|(mat_mask==2),0,1).astype('uint8')  
    gcfg=np.zeros((size[1],size[0]),np.uint8)
    gcfg=mask2
    
    img_cut = color*mask2[:,:,np.newaxis]

    contours, hierarchy=cv2.findContours(gcfg ,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    
    for cnt in contours:
        print cnt
        rect_box = cv2.minAreaRect(cnt)
        box = cv2.cv.BoxPoints(rect_box)
        box = np.int0(box)
        cv2.drawContours(color,[box], 0,(0,0,255),2)
    cv2.imshow('demo', color)
    cv2.waitKey(0)
    def findBrightObjects(self):
        
        cv.NamedWindow("camera")

        while True :
            frame = cv.QueryFrame(self.video1)
        #     print type(frame)
            [rows, cols] = cv.GetSize(frame)
            
#             image = cv.CreateMat(rows, cols, cv.CV_8UC3)
            image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, frame.nChannels)        
            cv.Copy(frame, image)
#             image = cv.GetMat(frame)
            cv.ShowImage("camera", image)
            
           
#             grayScaleFullImage = cv.CreateImage((image.width, image.height), 8, 1)
#             cv.CvtColor(image, grayScaleFullImage, cv.CV_BGR2GRAY)
            
#             convert to hsv
            hsvImage = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, frame.nChannels)  
            cv.CvtColor(image, hsvImage, cv.CV_BGR2HSV)
            cv.ShowImage("hsv", hsvImage)
           
#             hsvImage = cv2.cvtColor(imageArray, cv.CV_BGR2HSV)

#             h_plane = cv.CreateImage(cv.GetSize(image), 8, 1)
#             s_plane = cv.CreateImage(cv.GetSize(image), 8, 1)
#             v_plane = cv.CreateImage(cv.GetSize(image), 8, 1)
            
#           Split HSV into two of it's three channels. V channel is same as greyscale image so ignore.
#             cv.Split(hsvImage, h_plane, s_plane, v_plane, None)
#             http://robbierickman.blogspot.co.uk/2011/11/laserduckpy-coloured-object-tracking.html
            
            
#             planes = [h_plane, s_plane]
# 
#             h_bins = 30
#             s_bins = 32
#             hist_size = [h_bins, s_bins]
#             # hue varies from 0 (~0 deg red) to 180 (~360 deg red again */
#             h_ranges = [0, 180]
#             # saturation varies from 0 (black-gray-white) to
#             # 255 (pure spectrum color)
#             s_ranges = [0, 255]
#             ranges = [h_ranges, s_ranges]
#             scale = 10
#             hist = cv.CreateHist([h_bins, s_bins], cv.CV_HIST_ARRAY, ranges, 1)
#             cv.CalcHist([cv.GetImage(i) for i in planes], hist)
#             (_, max_value, _, _) = cv.GetMinMaxHistValue(hist)
#         
#             hist_img = cv.CreateImage((h_bins*scale, s_bins*scale), 8, 3)
#         
#             for h in range(h_bins):
#                 for s in range(s_bins):
#                     bin_val = cv.QueryHistValue_2D(hist, h, s)
#                     intensity = cv.Round(bin_val * 255 / max_value)
#                     cv.Rectangle(hist_img,
#                                  (h*scale, s*scale),
#                                  ((h+1)*scale - 1, (s+1)*scale - 1),
#                                  cv.RGB(intensity, intensity, intensity),
#                                  cv.CV_FILLED)
            
            # http://uvhar.googlecode.com/hg/test/laser_tracker.py
            # Threshold ranges of HSV components.
            
#             cv.InRangeS(h_plane, hmin, hmax, h_plane)
# #             cv.InRangeS(s_plane, smin, smax, s_plane)
# #             cv.InRangeS(v_plane, vmin, vmax, v_plane)
#             
#             finalImage = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
#     
#             # Perform an AND on HSV components to identify the laser!
#             cv.And(h_plane, s_plane, finalImage)
#             # This actually Worked OK for me without using Saturation.
#             # cv.cvAnd(laser_img, s_img,laser_img) 
#     
#             # Merge the HSV components back together.
#             cv.Merge(h_plane, s_plane, v_plane, None, hsvImage)
            
   
#             cv.ShowImage("hue", h_plane)
#             cv.ShowImage("saturation", s_plane)
#             cv.ShowImage("value", v_plane)
            
            thresholdImage = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
            cv.InRangeS(hsvImage, cv.Scalar(0, 100, 100), cv.Scalar(50, 255, 255), thresholdImage)
            
#             thresholdImage = cv2.threshold(hsvImage, [0, 100, 100], [50, 255, 255], cv2.THRESH_BINARY)
            cv.ShowImage("threshold image", thresholdImage)
            
            # remove noise from threshold image
            kernel = cv.CreateStructuringElementEx(9, 9, 5, 5, cv.CV_SHAPE_CROSS) 
             
            # Dilate- replaces pixel value with highest value pixel in kernel
            cv.Dilate(thresholdImage, thresholdImage, kernel, 2)
             
            # Erode- replaces pixel value with lowest value pixel in kernel
            cv.Erode(thresholdImage, thresholdImage, kernel, 2)

#             element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
#             cv.Dilate(thresholdImage, element, thresholdImage)
#             cv2.erode(thresholdImage, element, thresholdImage)
            
            cv.ShowImage("cleaned image ", thresholdImage)
            
            # contour detection
            imageArray = np.asarray(cv.GetMat(thresholdImage), np.uint8, 1)
            print type(imageArray)
            imageArray = imageArray.T
            
            
            
#             contours, hier = cv2.findContours(imageArray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)   
#             print "TYPE " + str(type(contours)) + " " + str(len(contours))
#              
# #             for i in contours:
# #                 print i
#             maxArea = -1
#             contourIndex = -1
#             if contours: 
#                 for i in range(0, len(contours)):
#                     cnt = contours[i].astype('int')
#                     print type(cnt)
#                     area = cv2.contourArea(cnt)
#                     print area
#                     if area > maxArea:
#                         maxArea = area
#                         contourIndex = i
#                          
#             if contourIndex != -1:
#                 cv2.drawContours(imageArray, contours, contourIndex, (0, 0 , 255), 10)

            
            params = cv2.SimpleBlobDetector_Params()
#             params.minDistBetweenBlobs = 1.0  # minimum 10 pixels between blobs
#             params.filterByArea = True        # filter my blobs by area of blob
#             params.minArea = 5.0             # min 20 pixels squared
#             params.maxArea = 500.0            # max 500 pixels squared
            params.minDistBetweenBlobs = 50.0
            params.filterByInertia = False
            params.filterByConvexity = False
            params.filterByColor = False
            params.filterByCircularity = False
            params.filterByArea = True
            params.minArea = 20.0
            params.maxArea = 500.0
            
            myBlobDetector = cv2.SimpleBlobDetector(params)
            keypoints = myBlobDetector.detect(imageArray)
            print "blobs " + str(keypoints)
            
            # extract the x y coordinates of the keypoints: 

            for i in range(0, len(keypoints) - 1):
                print keypoints[i].pt
                pt1 = (int(keypoints[i].pt[0]), int(keypoints[i].pt[1]))
                pt2 = (int(keypoints[i + 1].pt[0]), int(keypoints[i + 1].pt[1]))
                cv2.line(imageArray, pt1, pt2, (255, 0, 0))
#                 float X=keypoints[i].pt.x; 
#                 float Y=keypoints[i].pt.y;
            
                     
            cv2.imshow("final detection ", imageArray)
#             
            
            if cv.WaitKey(10) == 27:
                break 
Exemple #17
0
    # 3. 图像做帧差法,结果为res
    cv.AbsDiff(frame1gray, frame2gray, res) # 比较frame1gray 和 frame2gray 的差,结果输出给res
    # res = cv2.absdiff(frame1gray, frame2gray) # 此为cv2新版api
    cv.ShowImage("After AbsDiff", res)

    # 4. 保存res作为前景目标foreground
    # cv.Convert(res, gray)
    # out_foreground.write(np.uint8(res)) # 保存为前景目标
    # out_foreground.write(np.asarray(cv.GetMat(res)))
    cv.WriteFrame(out_foreground, cv.GetImage(res)) # res格式为cvmat格式,转化为iplimage格式

    # 5. 平滑处理
    # cv.Smooth(res, res, cv.CV_BLUR, 5, 5) # 光滑一下res

    # 6. 形态学变换,开闭操作
    element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5,  cv.CV_SHAPE_RECT) # CreateStructuringElementEx(cols, rows, anchorX, anchorY, shape, values=None)

    cv.MorphologyEx(res, res, None, None, cv.CV_MOP_OPEN) # cv2.morphologyEx(src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]])
    cv.MorphologyEx(res, res, None, None, cv.CV_MOP_CLOSE) # 形态学变换相应的开 闭操作

    # 7. 二值化阈值处理:对得到的前景进行阈值选择,去掉伪前景
    cv.Threshold(res, res, 10, 255, cv.CV_THRESH_BINARY) # 二值化 cv.Threshold(src, dst, threshold, maxValue, thresholdType)


    # 8. blob提取电梯区域 ------------- 未完
    # print(type(res))
    # # Set up the detector with default parameters.
    # detector = cv2.SimpleBlobDetector()
    # # Detect blobs.
    # keypoints = detector.detect(np.array(res))
    # # Draw detected blobs as red circles.