Beispiel #1
0
def erode_image(image, v, h):
    vImage = cv.CreateImage((image.width, image.height), image.depth,
                            image.nChannels)
    hImage = cv.CreateImage((image.width, image.height), image.depth,
                            image.nChannels)
    vElement = cv.CreateStructuringElementEx(1, v, 0, v / 2, cv.CV_SHAPE_RECT)
    hElement = cv.CreateStructuringElementEx(h, 1, h / 2, 0, cv.CV_SHAPE_RECT)
    cv.Erode(image, vImage, vElement, 1)
    cv.Erode(vImage, hImage, hElement, 1)
    return hImage
Beispiel #2
0
def erodeImage(img):
    kernel = cv.CreateStructuringElementEx(9, 9, 5, 5, cv.CV_SHAPE_CROSS)
    # Erode- replaces pixel value with lowest value pixel in kernel
    cv.Erode(img, img, kernel, 2)
    # Dilate- replaces pixel value with highest value pixel in kernel
    cv.Dilate(img, img, kernel, 2)
    return img
    def processImage(self, curframe):
        cv.Smooth(curframe, curframe)  #Remove false positives

        if not self.absdiff_frame:  #For the first time put values in difference, temp and moving_average
            self.absdiff_frame = cv.CloneImage(curframe)
            self.previous_frame = cv.CloneImage(curframe)
            cv.Convert(
                curframe, self.average_frame
            )  #Should convert because after runningavg take 32F pictures
        else:
            cv.RunningAvg(curframe, self.average_frame,
                          0.05)  #Compute the average

        cv.Convert(self.average_frame,
                   self.previous_frame)  #Convert back to 8U frame

        cv.AbsDiff(curframe, self.previous_frame,
                   self.absdiff_frame)  # moving_average - curframe

        cv.CvtColor(
            self.absdiff_frame, self.gray_frame,
            cv.CV_RGB2GRAY)  #Convert to gray otherwise can't do threshold
        cv.Threshold(self.gray_frame, self.gray_frame, 50, 255,
                     cv.CV_THRESH_BINARY)

        cv.Dilate(self.gray_frame, self.gray_frame, None,
                  15)  #to get object blobs
        cv.Erode(self.gray_frame, self.gray_frame, None, 10)
Beispiel #4
0
    def post_process_distance_img(self, dist_img):
        inverted_img=cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)

        #Blur image
        soft_img=cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)
        cv.Smooth(dist_img, soft_img, cv.CV_GAUSSIAN, 21, 21)


        #Apply threshold to have just black and white
        thresh_img=cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)
        cv.Threshold(soft_img, thresh_img, 1, 255, cv.CV_THRESH_BINARY)#CV_THRESH_OTSU is an adaptive thresholding method



#        #Create inverse image
#        for r in range(0,thresh_img.rows):
#            for c in range(0,thresh_img.cols):
#                inverted_img[r,c]=255-thresh_img[r,c]

        #Erode the inverse map to get it's skeleton
        eroded_img = cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)
        cv.Erode(inverted_img, eroded_img, iterations=10)

        #Create inverse image
        for r in range(0,eroded_img.rows):
            for c in range(0,eroded_img.cols):
                inverted_img[r,c]=255-eroded_img[r,c]

        return inverted_img
Beispiel #5
0
def difference_image(img1, img2):
    print " simg1 = simplify(img1)"
    simg1 = simplify(img1)
    print " simg2 = simplify(img2)"
    simg2 = simplify(img2)

    #dbg_image('simg1',simg1)
    #dbg_image('simg2',simg2)

    #create image buffers
    img3 = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1)
    simg3 = cv.CloneImage(img3)
    bitimage = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1)
    eimg3 = cv.CloneImage(bitimage)

    #process
    print " cv.AbsDiff(simg2,simg1,img3)"
    cv.AbsDiff(simg2, simg1, img3)
    print " cv.Smooth(img3,simg3)"
    cv.Smooth(img3, simg3)
    #dbg_image('simg3',simg3)
    # these threshold values must be calibrated
    #cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_TOZERO_INV)
    print " cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_BINARY)"
    cv.Threshold(simg3, bitimage, 50, 255, cv.CV_THRESH_BINARY)
    #dbg_image('bitimage',bitimage)
    print " cv.Erode(bitimage,eimg3)"
    cv.Erode(bitimage, eimg3)
    #dbg_image('eimg3',eimg3)
    return eimg3
Beispiel #6
0
def do_loop(self):
    # image processing
    if self.config.threshold:
        cv.Threshold(self.img_original, self.img_target,
                     self.config.pix_thresh_min, 0xff, cv.CV_THRESH_BINARY)
        cv.And(self.img_target, self.img_mask, self.img_target)
    if self.config.dilate:
        cv.Dilate(self.img_target,
                  self.img_target,
                  iterations=self.config.dilate)
    if self.config.erode:
        cv.Erode(self.img_target,
                 self.img_target,
                 iterations=self.config.erode)
    show_image(self)

    sys.stdout.write('> ')
    sys.stdout.flush()
    # keystroke processing
    ki = cv.WaitKey(0)

    # Simple character value, if applicable
    kc = None
    # Char if a common char, otherwise the integer code
    k = ki

    if 0 <= ki < 256:
        kc = chr(ki)
        k = kc
    elif 65506 < ki < 66000 and ki != 65535:
        ki2 = ki - 65506 - 30
        # modifier keys
        if ki2 >= 0:
            kc = chr(ki2)
            k = kc

    if kc:
        print '%d (%s)\n' % (ki, kc)
    else:
        print '%d\n' % ki

    if ki > 66000:
        return
    if ki < 0:
        print "Exiting on closed window"
        self.running = False
        return
    on_key(self, k)
Beispiel #7
0
def findImage(img):
    #Set up storage for images
    frame_size = cv.GetSize(img)
    img2 = cv.CreateImage(frame_size,8,3)
    tmp = cv.CreateImage(frame_size,8,cv.CV_8U)
    h = cv.CreateImage(frame_size,8,1)

    #copy original image to do work on
    cv.Copy(img,img2)

    #altering the image a bit for smoother processing
    cv.Smooth(img2,img2,cv.CV_BLUR,3)
    cv.CvtColor(img2,img2,cv.CV_BGR2HSV)

    #make sure temp is empty
    cv.Zero(tmp)

    #detection based on HSV value
    #30,100,90 lower limit on pic 41,255,255 on pic
    #cv.InRangeS(img2,cv.Scalar(25,100,87),cv.Scalar(50,255,255),tmp)
    #Range for green plate dot in my Living room
    #cv.InRangeS(img2,cv.Scalar(55,80,60),cv.Scalar(65,95,90),tmp)
    #classroom
    #cv.InRangeS(img2,cv.Scalar(55,80,60),cv.Scalar(70,110,70),tmp)
    #Kutztowns Gym
    cv.InRangeS(img2,cv.Scalar(65,100,112),cv.Scalar(85,107,143),tmp)

    elmt_shape=cv.CV_SHAPE_ELLIPSE
    pos = 3
    element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, elmt_shape)
    cv.Dilate(tmp,tmp,element,6)
    cv.Erode(tmp,tmp,element,2)

    cv.Split(tmp,h,None,None,None)
    storage = cv.CreateMemStorage()

    scan = sc.FindContours(h,storage)
    xyImage=drawCircles(scan,img)

    if xyImage != None:
            return (xyImage,tmp)
    else:
            return None
Beispiel #8
0
    def applyEffect(self, image, width, height):
        ipl_img = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]),
                                           cv.IPL_DEPTH_8U, 3)
        cv2.cv.SetData(ipl_img, image.tostring(),
                       image.dtype.itemsize * 3 * image.shape[1])

        gray = cv.CreateImage((width, height), 8, 1)  #tuple as the first arg

        dst_img = cv.CreateImage(cv.GetSize(ipl_img), cv.IPL_DEPTH_8U,
                                 3)  #_16S  => cv2.cv.iplimage
        if self.effect == 'dilate':
            cv.Dilate(ipl_img, dst_img, None, 5)
        elif self.effect == 'laplace':
            cv.Laplace(ipl_img, dst_img, 3)
        elif self.effect == 'smooth':
            cv.Smooth(ipl_img, dst_img, cv.CV_GAUSSIAN)
        elif self.effect == 'erode':
            cv.Erode(ipl_img, dst_img, None, 1)

        cv.Convert(dst_img, ipl_img)
        return self.ipl2tk_image(dst_img)
Beispiel #9
0
    def motionDetect(self, img):
        cv.Smooth(img, img, cv.CV_GAUSSIAN, 3, 0)

        cv.RunningAvg(img, self.movingAvg, 0.020, None)
        cv.ConvertScale(self.movingAvg, self.tmp, 1.0, 0.0)
        cv.AbsDiff(img, self.tmp, self.diff)
        cv.CvtColor(self.diff, self.grayImage, cv.CV_RGB2GRAY)
        cv.Threshold(self.grayImage, self.grayImage, 70,255, cv.CV_THRESH_BINARY)
        cv.Dilate(self.grayImage, self.grayImage, None, 18)#18   
        cv.Erode(self.grayImage, self.grayImage, None, 10)#10
        storage = cv.CreateMemStorage(0)
        contour = cv.FindContours(self.grayImage, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
#        points = []                                                                                      
        while contour:
            boundRect = cv.BoundingRect(list(contour))
            contour = contour.h_next()
            pt1 = (boundRect[0], boundRect[1])
            pt2 = (boundRect[0] + boundRect[2], boundRect[1] + boundRect[3])
            cv.Rectangle(img, pt1, pt2, cv.CV_RGB(255,255,0), 1)

        return img
Beispiel #10
0
for files in glob.glob('D:/pic/car/*.jpg'):
    filepath,filename = os.path.split(files)
    image = cv2.imread(filepath + '/' + filename)
     # image = cv2.imread('D:/pic/car2.jpg')
    h,w = image.shape[:2]
    #灰度化
    gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
    grayIPimage = cv.GetImage(cv.fromarray(gray))
    sobel  = cv.CreateImage((w, h),cv2.IPL_DEPTH_16S, 1)  #创建一张深度为16位有符号(-65536~65535)的的图像区域保持处理结果
    cv.Sobel(grayIPimage,sobel,2,0,7)         # 进行x方向的sobel检测
    temp  = cv.CreateImage(cv.GetSize(sobel),cv2.IPL_DEPTH_8U, 1)       #图像格式转换回8位深度已进行下一步处理
    cv.ConvertScale(sobel, temp,0.00390625, 0)
    cv.Threshold(temp, temp, 0, 255, cv2.THRESH_OTSU)
    kernal = cv.CreateStructuringElementEx(3,1, 1, 0, 0)
    cv.Dilate(temp, temp,kernal,2)
    cv.Erode(temp, temp,kernal,4)
    cv.Dilate(temp, temp,kernal,2)
#     cv.ShowImage('1', temp)
    kernal = cv.CreateStructuringElementEx(1,3, 0, 1, 0)
    cv.Erode(temp, temp,kernal,1)
    cv.Dilate(temp, temp,kernal,3)
#     cv.ShowImage('2', temp)
    temp = np.asarray(cv.GetMat(temp))
    contours, heirs  = cv2.findContours(temp,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    
    for tours in contours:
        rc = cv2.boundingRect(tours)
        if rc[2]/rc[3] >= 2:
            #rc[0] 表示图像左上角的纵坐标,rc[1] 表示图像左上角的横坐标,rc[2] 表示图像的宽度,rc[3] 表示图像的高度,
            cv2.rectangle(image, (rc[0],rc[1]),(rc[0]+rc[2],rc[1]+rc[3]),(255,0,255))
            imageIp = cv.GetImage(cv.fromarray(image))
Beispiel #11
0
def erodeImage(im, nbiter=0):
    for i in range(nbiter):
        cv.Erode(im, im)
    def run(self):
        logging.debug(' starting run ')
        global samecolorclient
        global capture
        global centroidList  #abh
        global lock  #abh
        global lock2  #abh
        global lock3  #abh
        global lock4  #abh
        mydata = threading.local()
        #window1=" Color Detection"
        mydata.window2 = str(self.name) + " Threshold"
        #cv.NamedWindow(window1,0)
        lock4.acquire()  #abh
        cv.NamedWindow(mydata.window2, 0)
        lock4.release()  #abh
        mydata.centroidold = [0, 0]
        mydata.flag = 0
        mydata.roi = [100, 22, 390, 390]
        #mydata.roi=[95,40,380,350]
        while True:
            lock2.acquire()  #abh
            lock4.acquire()  #abh
            mydata.color_image = cv.QueryFrame(capture)
            lock4.release()  #abh
            lock2.release()  #abh
            if (mydata.flag == 0):
                lock4.acquire  #abh lock4.release        #abh
                mydata.color_image = cv.GetSubRect(mydata.color_image,
                                                   (100, 22, 390, 390))
                lock4.release  #abh
            else:
                lock4.acquire  #abh lock4.release        #abh
                mydata.color_image = cv.GetSubRect(
                    mydata.color_image,
                    (int(mydata.roi[0]), int(mydata.roi[1]), int(
                        mydata.roi[2]), int(mydata.roi[3])))
                lock4.release  #abh
            lock4.acquire  #abh lock4.release        #abh
            cv.Flip(mydata.color_image, mydata.color_image, 1)
            cv.Smooth(mydata.color_image, mydata.color_image, cv.CV_MEDIAN, 3,
                      0)
            #logging.debug(' Starting getthresholdedimg ')
            mydata.imghsv = cv.CreateImage(cv.GetSize(mydata.color_image), 8,
                                           3)
            cv.CvtColor(mydata.color_image, mydata.imghsv,
                        cv.CV_BGR2YCrCb)  # Convert image from RGB to HSV
            mydata.imgnew = cv.CreateImage(cv.GetSize(mydata.color_image),
                                           cv.IPL_DEPTH_8U, 1)
            mydata.imgthreshold = cv.CreateImage(
                cv.GetSize(mydata.color_image), 8, 1)
            lock4.release  #abh
            mydata.c = self.color[0]
            mydata.minc = (float(mydata.c[0]), float(mydata.c[1]),
                           float(mydata.c[2]))
            mydata.c = self.color[1]
            mydata.maxc = (float(mydata.c[0]), float(mydata.c[1]),
                           float(mydata.c[2]))
            lock4.acquire  #abh lock4.release        #abh
            cv.InRangeS(mydata.imghsv, cv.Scalar(*(mydata.minc)),
                        cv.Scalar(*(mydata.maxc)), mydata.imgnew)
            cv.Add(mydata.imgnew, mydata.imgthreshold, mydata.imgthreshold)
            #logging.debug(' Exiting getthreasholdedimg')
            #logging.debug('function returned from thresholdedimg')
            cv.Erode(mydata.imgthreshold, mydata.imgthreshold, None, 1)
            cv.Dilate(mydata.imgthreshold, mydata.imgthreshold, None, 4)
            mydata.img2 = cv.CloneImage(mydata.imgthreshold)
            mydata.storage = cv.CreateMemStorage(0)
            mydata.contour = cv.FindContours(mydata.imgthreshold,
                                             mydata.storage,
                                             cv.CV_RETR_EXTERNAL,
                                             cv.CV_CHAIN_APPROX_SIMPLE)
            lock4.release  #abh
            mydata.points = []
            #logging.debug('Starting while contour')
            while mydata.contour:
                # Draw bounding rectangles
                lock4.acquire  #abh lock4.release        #abh
                mydata.bound_rect = cv.BoundingRect(list(mydata.contour))
                lock4.release  #abh
                mydata.contour = mydata.contour.h_next()
                mydata.pt1 = (mydata.bound_rect[0], mydata.bound_rect[1])
                mydata.pt2 = (mydata.bound_rect[0] + mydata.bound_rect[2],
                              mydata.bound_rect[1] + mydata.bound_rect[3])
                mydata.points.append(mydata.pt1)
                mydata.points.append(mydata.pt2)
                lock4.acquire  #abh lock4.release        #abh
                cv.Rectangle(
                    mydata.color_image, mydata.pt1, mydata.pt2,
                    cv.CV_RGB(mydata.maxc[0], mydata.maxc[1], mydata.maxc[2]),
                    1)
                lock4.release  #abh
                # Calculating centroids
                if (((mydata.bound_rect[2]) * (mydata.bound_rect[3])) < 3500):
                    #logging.debug('Inside iffffffffffffffffffffffff')
                    lock4.acquire  #abh lock4.release        #abh
                    mydata.centroidx = cv.Round(
                        (mydata.pt1[0] + mydata.pt2[0]) / 2)
                    mydata.centroidy = cv.Round(
                        (mydata.pt1[1] + mydata.pt2[1]) / 2)
                    lock4.release  #abh
                    if (mydata.flag == 1):
                        #logging.debug("inside flag1")
                        mydata.centroidx = mydata.roi[0] + mydata.centroidx
                        mydata.centroidy = mydata.roi[1] + mydata.centroidy
                    mydata.centroidnew = [mydata.centroidx, mydata.centroidy]
                    #logging.debug('mydataroi[0] '+str(mydata.roi[0]) + ';centroidx ' + str(mydata.centroidx))
                    #logging.debug('mydataroi[1] '+str(mydata.roi[1]) + ';centroidy ' + str(mydata.centroidy))
                    #print mydata.centroidx                                 #abh
                    #print mydata.centroidy                                 #abh
                    mydata.tmpclient = []
                    lock3.acquire()  #abh
                    mydata.tmpclient = samecolorclient[self.i]
                    lock3.release()  #abh
                    mydata.distance = math.sqrt(
                        math.pow((mydata.centroidnew[0] -
                                  mydata.centroidold[0]), 2) +
                        math.pow((mydata.centroidnew[1] -
                                  mydata.centroidold[1]), 2))
                    #lock.acquire()                                         #abh                                                            #abh commented
                    for mydata.j in range(len(mydata.tmpclient)):
                        mydata.client_socket = mydata.tmpclient[mydata.j]
                        #logging.debug('before centroid send...')
                        if (mydata.distance >= 1.50):
                            print 'inside 1.50 '

                            #self.server_socket.sendto(str(mydata.centroidnew),mydata.client_socket) #abh
                            lock.acquire()  #abh
                            centroidList[colorlist.index(
                                self.color)] = mydata.centroidnew  #abh
                            del mydata.centroidold[:]
                            #logging.debug(str(centroidList))
                            self.server_socket.sendto(
                                str(centroidList), mydata.client_socket)  #abh
                            lock.release()  #abh
                            #logging.debug ('updating done.')                                                 #abh
                            #print centroidList                                                       #abh
                            mydata.centroidold = mydata.centroidnew[:]
                        else:
                            #self.server_socket.sendto(str(mydata.centroidold),mydata.client_socket) #abh
                            lock.acquire()  #abh
                            centroidList[colorlist.index(
                                self.color)] = mydata.centroidold  #abh
                            #logging.debug(str(centroidList))
                            self.server_socket.sendto(
                                str(centroidList), mydata.client_socket)  #abh
                            lock.release()  #abh
                            #logging.debug ('updating done2.')                                                  #abh
                            #print centroidList                                                       #abh
                    #    logging.debug('byte sent to client')
                    #lock.release()                                         #abh
                    mydata.roi[0] = mydata.centroidx - 50
                    mydata.roi[1] = mydata.centroidy - 50
                    if (mydata.roi[0] < 95):
                        mydata.roi[0] = 95
                    if (mydata.roi[1] < 40):
                        mydata.roi[1] = 40
                    mydata.roi[2] = 100
                    mydata.roi[3] = 100
                    if ((mydata.roi[0] + mydata.roi[2]) > 475):
                        mydata.roi[0] = mydata.roi[0] - (
                            (mydata.roi[0] + mydata.roi[2]) - 475)
                    if ((mydata.roi[1] + mydata.roi[3]) > 390):
                        mydata.roi[1] = mydata.roi[1] - (
                            (mydata.roi[1] + mydata.roi[3]) - 390)
                    #del mydata.centroidnew[:]
                    mydata.flag = 1
            if mydata.contour is None:
                mydata.flag = 0
            #cv.ShowImage(window1,mydata.color_image)
            lock4.acquire  #abh lock4.release        #abh
            cv.ShowImage(mydata.window2, mydata.img2)
            lock4.release  #abh

            if cv.WaitKey(33) == 27:  #here it was 33 instead of 10
                #cv.DestroyWindow(mydata.window1)
                #cv.DestroyWindow(mydata.window2)
                break
Beispiel #13
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        #nframes =+ 1

        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(cv.GetSize(frame), 8, 3)
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)

        def totuple(a):
            try:
                return tuple(totuple(i) for i in a)
            except TypeError:
                return a

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]

            color_image = cv.QueryFrame(self.capture)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, .1, None)
                cv.ShowImage("BG", moving_average)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)
            #cv.ShowImage("BG",difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            cv.ShowImage("BG1", grey_image)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 40, 255, cv.CV_THRESH_BINARY)
            #cv.ShowImage("BG2", grey_image)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 8)
            cv.Erode(grey_image, grey_image, None, 3)
            cv.ShowImage("BG3", grey_image)

            storage = cv.CreateMemStorage(0)
            global contour
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            points = []

            while contour:
                global bound_rect
                bound_rect = cv.BoundingRect(list(contour))
                polygon_points = cv.ApproxPoly(list(contour), storage,
                                               cv.CV_POLY_APPROX_DP)
                contour = contour.h_next()

                global pt1, pt2
                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2],
                       bound_rect[1] + bound_rect[3])

                #size control
                if (bound_rect[0] - bound_rect[2] >
                        10) and (bound_rect[1] - bound_rect[3] > 10):

                    points.append(pt1)
                    points.append(pt2)

                    #points += list(polygon_points)
                    global box, box2, box3, box4, box5
                    box = cv.MinAreaRect2(polygon_points)
                    box2 = cv.BoxPoints(box)
                    box3 = np.int0(np.around(box2))
                    box4 = totuple(box3)
                    box5 = box4 + (box4[0], )

                    cv.FillPoly(grey_image, [
                        list(polygon_points),
                    ], cv.CV_RGB(255, 255, 255), 0, 0)
                    cv.PolyLine(color_image, [
                        polygon_points,
                    ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                    cv.PolyLine(color_image, [list(box5)], 0, (0, 0, 255), 2)
                    #cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

                    if len(points):
                        #center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
                        center1 = (pt1[0] + pt2[0]) / 2
                        center2 = (pt1[1] + pt2[1]) / 2
                        #print center1, center2, center_point
                        #cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                        #cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                        #cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                        cv.Circle(color_image, (center1, center2), 5,
                                  cv.CV_RGB(0, 0, 255), -1)

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                #cv.DestroyAllWindows()
                break
def procImg(img, sideName, dispFlag):

    #creates empty images of the same size
    imdraw = cv.CreateImage(cv.GetSize(img), 8, 3)
    #put the smoothed image here
    imgSmooth = cv.CreateImage(cv.GetSize(img), 8, 3)

    cv.SetZero(imdraw)
    cv.Smooth(img, imgSmooth, cv.CV_GAUSSIAN, 3, 0)  #Gaussian filter the image
    imgbluethresh = getthresholdedimg(
        imgSmooth)  #Get a color thresholed binary image
    cv.Erode(imgbluethresh, imgbluethresh, None, 3)
    cv.Dilate(imgbluethresh, imgbluethresh, None, 10)
    #img2 = cv.CloneImage(imgbluethresh)
    storage = cv.CreateMemStorage(0)
    contour = cv.FindContours(imgbluethresh, storage, cv.CV_RETR_CCOMP,
                              cv.CV_CHAIN_APPROX_SIMPLE)

    centroidx = 0
    centroidy = 0
    prevArea = 0
    pt1 = (0, 0)
    pt2 = (0, 0)

    while contour:
        #find the area of each collection of contiguous points (contour)
        bound_rect = cv.BoundingRect(list(contour))
        contour = contour.h_next()

        #get the largest contour
        area = bound_rect[2] * bound_rect[3]

        if dispFlag:
            print("Area= " + str(area))

        if (area > prevArea and area > 3000):
            pt1 = (bound_rect[0], bound_rect[1])
            pt2 = (bound_rect[0] + bound_rect[2],
                   bound_rect[1] + bound_rect[3])

    # Draw bounding rectangle
    cv.Rectangle(img, pt1, pt2, cv.CV_RGB(255, 0, 0), 3)

    # calculating centroid
    centroidx = cv.Round((pt1[0] + pt2[0]) / 2)
    centroidy = cv.Round((pt1[1] + pt2[1]) / 2)

    if (centroidx == 0 or centroidy == 0):
        print("no blimp detected from " + sideName)
    else:
        print(sideName + " centroid x:" + str(centroidx))
        print(sideName + " centroid y:" + str(centroidy))

    print("")

    if dispFlag:
        small_thresh = cv.CreateImage(
            (int(0.25 * cv.GetSize(imgbluethresh)[0]),
             int(0.25 * cv.GetSize(imgbluethresh)[1])), 8, 1)
        cv.Resize(imgbluethresh, small_thresh)
        cv.ShowImage(sideName + "_threshold", small_thresh)
        cv.WaitKey(100)

        small_hsv = cv.CreateImage((int(
            0.25 * cv.GetSize(imghsv)[0]), int(0.25 * cv.GetSize(imghsv)[1])),
                                   8, 3)
        cv.Resize(imghsv, small_hsv)
        cv.ShowImage(sideName + "_hsv", small_hsv)
        cv.WaitKey(100)

    return (centroidx, centroidy)
Beispiel #15
0
    if Data[Grid_Intersections.index((x, y))] == '0':
        Data[Grid_Intersections.index((x, y))] = '1'
    else:
        Data[Grid_Intersections.index((x, y))] = '0'
    return get_data(x, y)


# main loop
Target = cv.CloneImage(Img)
while True:
    # image processing
    if Dilate:
        cv.Dilate(Target, Target, iterations=Dilate)
        Dilate = 0
    if Erode:
        cv.Erode(Target, Target, iterations=Erode)
        Erode = 0
    if Threshold:
        cv.Threshold(Img, Target, Threshold_Min, 0xff, cv.CV_THRESH_BINARY)
        cv.And(Target, Mask, Target)

    show_image()
    # keystroke processing
    k = cv.WaitKey(0)
    print k
    if k > 66000:
        continue
    if k < 256:
        k = chr(k)
    else:
        if k > 65506 and k != 65535:
Beispiel #16
0
    def findBrightObjects(self):
        
        cv.NamedWindow("camera")

        while True :
            frame = cv.QueryFrame(self.video1)
        #     print type(frame)
            [rows, cols] = cv.GetSize(frame)
            
#             image = cv.CreateMat(rows, cols, cv.CV_8UC3)
            image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, frame.nChannels)        
            cv.Copy(frame, image)
#             image = cv.GetMat(frame)
            cv.ShowImage("camera", image)
            
           
#             grayScaleFullImage = cv.CreateImage((image.width, image.height), 8, 1)
#             cv.CvtColor(image, grayScaleFullImage, cv.CV_BGR2GRAY)
            
#             convert to hsv
            hsvImage = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, frame.nChannels)  
            cv.CvtColor(image, hsvImage, cv.CV_BGR2HSV)
            cv.ShowImage("hsv", hsvImage)
           
#             hsvImage = cv2.cvtColor(imageArray, cv.CV_BGR2HSV)

#             h_plane = cv.CreateImage(cv.GetSize(image), 8, 1)
#             s_plane = cv.CreateImage(cv.GetSize(image), 8, 1)
#             v_plane = cv.CreateImage(cv.GetSize(image), 8, 1)
            
#           Split HSV into two of it's three channels. V channel is same as greyscale image so ignore.
#             cv.Split(hsvImage, h_plane, s_plane, v_plane, None)
#             http://robbierickman.blogspot.co.uk/2011/11/laserduckpy-coloured-object-tracking.html
            
            
#             planes = [h_plane, s_plane]
# 
#             h_bins = 30
#             s_bins = 32
#             hist_size = [h_bins, s_bins]
#             # hue varies from 0 (~0 deg red) to 180 (~360 deg red again */
#             h_ranges = [0, 180]
#             # saturation varies from 0 (black-gray-white) to
#             # 255 (pure spectrum color)
#             s_ranges = [0, 255]
#             ranges = [h_ranges, s_ranges]
#             scale = 10
#             hist = cv.CreateHist([h_bins, s_bins], cv.CV_HIST_ARRAY, ranges, 1)
#             cv.CalcHist([cv.GetImage(i) for i in planes], hist)
#             (_, max_value, _, _) = cv.GetMinMaxHistValue(hist)
#         
#             hist_img = cv.CreateImage((h_bins*scale, s_bins*scale), 8, 3)
#         
#             for h in range(h_bins):
#                 for s in range(s_bins):
#                     bin_val = cv.QueryHistValue_2D(hist, h, s)
#                     intensity = cv.Round(bin_val * 255 / max_value)
#                     cv.Rectangle(hist_img,
#                                  (h*scale, s*scale),
#                                  ((h+1)*scale - 1, (s+1)*scale - 1),
#                                  cv.RGB(intensity, intensity, intensity),
#                                  cv.CV_FILLED)
            
            # http://uvhar.googlecode.com/hg/test/laser_tracker.py
            # Threshold ranges of HSV components.
            
#             cv.InRangeS(h_plane, hmin, hmax, h_plane)
# #             cv.InRangeS(s_plane, smin, smax, s_plane)
# #             cv.InRangeS(v_plane, vmin, vmax, v_plane)
#             
#             finalImage = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
#     
#             # Perform an AND on HSV components to identify the laser!
#             cv.And(h_plane, s_plane, finalImage)
#             # This actually Worked OK for me without using Saturation.
#             # cv.cvAnd(laser_img, s_img,laser_img) 
#     
#             # Merge the HSV components back together.
#             cv.Merge(h_plane, s_plane, v_plane, None, hsvImage)
            
   
#             cv.ShowImage("hue", h_plane)
#             cv.ShowImage("saturation", s_plane)
#             cv.ShowImage("value", v_plane)
            
            thresholdImage = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
            cv.InRangeS(hsvImage, cv.Scalar(0, 100, 100), cv.Scalar(50, 255, 255), thresholdImage)
            
#             thresholdImage = cv2.threshold(hsvImage, [0, 100, 100], [50, 255, 255], cv2.THRESH_BINARY)
            cv.ShowImage("threshold image", thresholdImage)
            
            # remove noise from threshold image
            kernel = cv.CreateStructuringElementEx(9, 9, 5, 5, cv.CV_SHAPE_CROSS) 
             
            # Dilate- replaces pixel value with highest value pixel in kernel
            cv.Dilate(thresholdImage, thresholdImage, kernel, 2)
             
            # Erode- replaces pixel value with lowest value pixel in kernel
            cv.Erode(thresholdImage, thresholdImage, kernel, 2)

#             element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
#             cv.Dilate(thresholdImage, element, thresholdImage)
#             cv2.erode(thresholdImage, element, thresholdImage)
            
            cv.ShowImage("cleaned image ", thresholdImage)
            
            # contour detection
            imageArray = np.asarray(cv.GetMat(thresholdImage), np.uint8, 1)
            print type(imageArray)
            imageArray = imageArray.T
            
            
            
#             contours, hier = cv2.findContours(imageArray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)   
#             print "TYPE " + str(type(contours)) + " " + str(len(contours))
#              
# #             for i in contours:
# #                 print i
#             maxArea = -1
#             contourIndex = -1
#             if contours: 
#                 for i in range(0, len(contours)):
#                     cnt = contours[i].astype('int')
#                     print type(cnt)
#                     area = cv2.contourArea(cnt)
#                     print area
#                     if area > maxArea:
#                         maxArea = area
#                         contourIndex = i
#                          
#             if contourIndex != -1:
#                 cv2.drawContours(imageArray, contours, contourIndex, (0, 0 , 255), 10)

            
            params = cv2.SimpleBlobDetector_Params()
#             params.minDistBetweenBlobs = 1.0  # minimum 10 pixels between blobs
#             params.filterByArea = True        # filter my blobs by area of blob
#             params.minArea = 5.0             # min 20 pixels squared
#             params.maxArea = 500.0            # max 500 pixels squared
            params.minDistBetweenBlobs = 50.0
            params.filterByInertia = False
            params.filterByConvexity = False
            params.filterByColor = False
            params.filterByCircularity = False
            params.filterByArea = True
            params.minArea = 20.0
            params.maxArea = 500.0
            
            myBlobDetector = cv2.SimpleBlobDetector(params)
            keypoints = myBlobDetector.detect(imageArray)
            print "blobs " + str(keypoints)
            
            # extract the x y coordinates of the keypoints: 

            for i in range(0, len(keypoints) - 1):
                print keypoints[i].pt
                pt1 = (int(keypoints[i].pt[0]), int(keypoints[i].pt[1]))
                pt2 = (int(keypoints[i + 1].pt[0]), int(keypoints[i + 1].pt[1]))
                cv2.line(imageArray, pt1, pt2, (255, 0, 0))
#                 float X=keypoints[i].pt.x; 
#                 float Y=keypoints[i].pt.y;
            
                     
            cv2.imshow("final detection ", imageArray)
#             
            
            if cv.WaitKey(10) == 27:
                break 
Beispiel #17
0
    def run(self):
        # Capture first frame to get size
        frame = self.get_image2()
        frame_size = cv.GetSize(frame)
        color_image = cv.CreateImage(frame_size, 8, 3)
        grey_image = cv.CreateImage(frame_size, cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(frame_size, cv.IPL_DEPTH_32F, 3)

        first = True

        while True:
            closest_to_left = cv.GetSize(frame)[0]
            closest_to_right = cv.GetSize(frame)[1]
            print "getting Image"
            color_image = self.get_image2()
            print "got image"

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)

            if first:
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
                first = False
            else:
                cv.RunningAvg(color_image, moving_average, 0.30, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image to grayscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Convert the image to black and white.
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            # Dilate and erode to get people blobs
            cv.Dilate(grey_image, grey_image, None, 18)
            cv.Erode(grey_image, grey_image, None, 10)

            storage = cv.CreateMemStorage(0)
            contour = cv.FindContours(grey_image, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
            points = []

            while contour:
                bound_rect = cv.BoundingRect(list(contour))
                contour = contour.h_next()

                pt1 = (bound_rect[0], bound_rect[1])
                pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
                points.append(pt1)
                points.append(pt2)
                cv.Rectangle(color_image, pt1, pt2, cv.CV_RGB(255,0,0), 1)

            if len(points):
                center_point = reduce(lambda a, b: ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2), points)
                cv.Circle(color_image, center_point, 40, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 30, cv.CV_RGB(255, 100, 0), 1)
                cv.Circle(color_image, center_point, 20, cv.CV_RGB(255, 255, 255), 1)
                cv.Circle(color_image, center_point, 10, cv.CV_RGB(255, 100, 0), 1)

            cv.ShowImage("Target", color_image)

            # Listen for ESC key
            c = cv.WaitKey(7) % 0x100
            if c == 27:
                break
Beispiel #18
0
capture = cv.CaptureFromCAM(0)
sleep(5)
frame = cv.QueryFrame(capture)
frame_size = cv.GetSize(frame)
grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
test = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.NamedWindow("Real")
cv.NamedWindow("Threshold")
while (1):
    color_image = cv.QueryFrame(capture)
    imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3)
    cv.Flip(color_image, color_image, 1)
    cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)
    imgyellowthresh = getthresholdedimg(color_image)
    cv.Erode(imgyellowthresh, imgyellowthresh, None, 3)
    cv.Dilate(imgyellowthresh, imgyellowthresh, None, 10)

    storage = cv.CreateMemStorage(0)
    contour = cv.FindContours(imgyellowthresh, storage, cv.CV_RETR_EXTERNAL,
                              cv.CV_CHAIN_APPROX_SIMPLE)
    points = []

    #	This is the new part here. ie Use of cv.BoundingRect()
    while contour:
        # Draw bounding rectangles
        bound_rect = cv.BoundingRect(list(contour))
        contour = contour.h_next()
        '''if contour!=None and contour.h_next()!=None:
			contour=contour.h_next()[0]
			print contour.h_next()[0]'''
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        width = frame.width
        height = frame.height
        surface = width * height  # Surface area of the image
        cursurface = 0  # Hold the current surface that have changed

        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        difference = None

        while True:
            color_image = cv.QueryFrame(self.capture)

            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3,
                      0)  # Remove false positives

            if not difference:  # For the first time put values in difference, temp and moving_average
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
            else:
                cv.RunningAvg(color_image, moving_average, 0.020,
                              None)  # Compute the average

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            # Convert the image so that it can be thresholded
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            cv.Dilate(grey_image, grey_image, None, 18)  # to get object blobs
            cv.Erode(grey_image, grey_image, None, 10)

            # Find contours
            storage = cv.CreateMemStorage(0)
            contours = cv.FindContours(grey_image, storage,
                                       cv.CV_RETR_EXTERNAL,
                                       cv.CV_CHAIN_APPROX_SIMPLE)

            backcontours = contours  # Save contours

            while contours:  # For all contours compute the area
                cursurface += cv.ContourArea(contours)
                contours = contours.h_next()

            avg = (
                cursurface * 100
            ) / surface  # Calculate the average of contour area on the total size
            if avg > self.ceil:
                print("Something is moving !")
                ring = IntrusionAlarm()
                ring.run()

            # print avg,"%"
            cursurface = 0  # Put back the current surface to 0

            # Draw the contours on the image
            _red = (0, 0, 255)
            # Red for external contours
            _green = (0, 255, 0)
            # Gren internal contours
            levels = 1  # 1 contours drawn, 2 internal contours as well, 3 ...
            cv.DrawContours(color_image, backcontours, _red, _green, levels, 2,
                            cv.CV_FILLED)

            cv.ShowImage("Virtual Eye", color_image)

            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
            elif c == 99:
                cv2.destroyWindow('Warning!!!')
Beispiel #20
0
def detectObject(filename):
    img=cv.LoadImage(filename)
    '''
    #get color histogram
    '''
   
#     im32f=np.zeros((img.shape[:2]),np.uint32)
    hist_range=[[0,256],[0,256],[0,256]]
    im32f=cv.CreateImage(cv.GetSize(img), cv2.IPL_DEPTH_32F, 3)
    cv.ConvertScale(img, im32f)
    
    
    hist=cv.CreateHist([32,32,32],cv.CV_HIST_ARRAY,hist_range,3)
    '''
    #create three histogram'''
    b=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    g=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    r=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    
   
    '''
    #create image backproject 32f, 8u
    '''
    backproject32f=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_32F,1)
    backproject8u=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_8U,1)
    '''
    #create binary
    '''
    bw=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_8U,1)
    '''
    #create kernel image
    '''
    kernel=cv.CreateStructuringElementEx(3, 3, 1, 1, cv2.MORPH_ELLIPSE)
    cv.Split(im32f, b, g, r,None)

    planes=[b,g,r]
    cv.CalcHist(planes, hist)
    '''
    #find min and max histogram bin.
    '''
    minval=maxval=0.0
    min_idx=max_idx=0
    minval, maxval, min_idx, max_idx=cv.GetMinMaxHistValue(hist)
    '''
    # threshold histogram.  this sets the bin values that are below the threshold
    to zero
    '''
    cv.ThreshHist(hist, maxval/32.0)
    '''
    #backproject the thresholded histogram, backprojection should contian higher values for
    #background and lower values for the foreground
    '''
    cv.CalcBackProject(planes, backproject32f, hist)
    '''
    #convert to 8u type
    '''
    val_min=val_max=0.0
    idx_min=idx_max=0
    val_min,val_max,idx_min,idx_max=cv.MinMaxLoc(backproject32f)
    cv.ConvertScale(backproject32f, backproject8u,255.0/maxval)
    '''
    #threshold backprojected image. this gives us the background
    '''
    cv.Threshold(backproject8u, bw, 10, 255, cv2.THRESH_BINARY)
    '''
    #some morphology on background
    '''
    cv.Dilate(bw, bw,kernel,1)
    cv.MorphologyEx(bw, bw, None,kernel, cv2.MORPH_CLOSE, 2)
    '''
    #get the foreground
    '''
    cv.SubRS(bw,cv.Scalar(255,255,255),bw)
    cv.MorphologyEx(bw, bw, None, kernel,cv2.MORPH_OPEN,2)
    cv.Erode(bw, bw, kernel, 1)
    '''
    #find contours of foreground
    #Grabcut
    '''
    size=cv.GetSize(bw)
    color=np.asarray(img[:,:])
    fg=np.asarray(bw[:,:])
#     mask=cv.CreateMat(size[1], size[0], cv2.CV_8UC1)
    '''
    #Make anywhere black in the grey_image (output from MOG) as likely background
    #Make anywhere white in the grey_image (output from MOG) as definite foreground
    '''
    rect = (0,0,0,0)
   
    mat_mask=np.zeros((size[1],size[0]),dtype='uint8')
    mat_mask[:,:]=fg
    
    mat_mask[mat_mask[:,:] == 0] = 2
    mat_mask[mat_mask[:,:] == 255] = 1
    
    '''
    #Make containers 
    '''                               
    bgdModel = np.zeros((1, 13 * 5))
    fgdModel = np.zeros((1, 13 * 5))
    cv2.grabCut(color, mat_mask, rect, bgdModel, fgdModel,cv2.GC_INIT_WITH_MASK)
    '''
    #Multiple new mask by original image to get cut
    '''
    mask2 = np.where((mat_mask==0)|(mat_mask==2),0,1).astype('uint8')  
    gcfg=np.zeros((size[1],size[0]),np.uint8)
    gcfg=mask2
    
    img_cut = color*mask2[:,:,np.newaxis]

    contours, hierarchy=cv2.findContours(gcfg ,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    
    for cnt in contours:
        print cnt
        rect_box = cv2.minAreaRect(cnt)
        box = cv2.cv.BoxPoints(rect_box)
        box = np.int0(box)
        cv2.drawContours(color,[box], 0,(0,0,255),2)
    cv2.imshow('demo', color)
    cv2.waitKey(0)
Beispiel #21
0
        #
        #cv.PyrMeanShiftFiltering(plate, zzz, 40, 15)
        foo = anpr.greyscale(plate)
        segmented = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        bar = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        cv.EqualizeHist(foo, segmented)

        cv.AdaptiveThreshold(
            segmented, bar, 255, cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
            cv.CV_THRESH_BINARY_INV,
            plate.height % 2 == 0 and (plate.height + 1) or plate.height,
            plate.height / 2)

        baz = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        el = cv.CreateStructuringElementEx(1, 2, 0, 0, cv.CV_SHAPE_RECT)
        cv.Erode(bar, baz, el)
        # quick_show(plate)
        print 'baz'
        quick_show(baz)
        print 'bar'
        quick_show(bar)
        print 'segmented'
        quick_show(segmented)
        image_path = 'plate.png'
        image_path2 = 'plate2.png'

        cv.SaveImage(image_path, foo)

        for tool in tools:
            print("Will use tool '%s'" % (tool.get_name()))
            # Ex: Will use tool 'tesseract'
Beispiel #22
0
def Erosion(pos):
    element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                            element_shape)
    cv.Erode(src, dest, element, 1)
    cv.ShowImage("Erosion & Dilation", dest)
Beispiel #23
0
def findBlob(rgbimage, hsvimage, maskimage, blobimage, hsvcolorrange, hsvmin,
             hsvmax):

    cv.CvtColor(rgbimage, hsvimage, cv.CV_BGR2HSV)
    hsvmin = [
        hsvmin[0] - hsvcolorrange[0], hsvmin[1] - hsvcolorrange[1],
        hsvmin[2] - hsvcolorrange[2]
    ]
    hsvmax = [
        hsvmax[0] + hsvcolorrange[0], hsvmax[1] + hsvcolorrange[1],
        hsvmax[2] + hsvcolorrange[2]
    ]
    if hsvmin[0] < 0:
        hsvmin[0] = 0
    if hsvmin[1] < 0:
        hsvmin[1] = 0
    if hsvmin[2] < 0:
        hsvmin[2] = 0

    if hsvmax[0] > 255:
        hsvmax[0] = 255
    if hsvmax[1] > 255:
        hsvmax[1] = 255
    if hsvmax[2] > 255:
        hsvmax[2] = 255

    cv.InRangeS(hsvimage, cv.Scalar(hsvmin[0], hsvmin[1], hsvmin[2]),
                cv.Scalar(hsvmax[0], hsvmax[1], hsvmax[2]), maskimage)

    element = cv.CreateStructuringElementEx(5, 5, 2, 2, cv.CV_SHAPE_RECT)
    cv.Erode(maskimage, maskimage, element, 1)
    cv.Dilate(maskimage, maskimage, element, 1)
    storage = cv.CreateMemStorage(0)

    cv.Copy(maskimage, blobimage)
    contour = cv.FindContours(maskimage, storage, cv.CV_RETR_CCOMP,
                              cv.CV_CHAIN_APPROX_SIMPLE)

    trackedpoint = None
    maxtrackedpoint = None

    maxareasize = 0

    #You can tune these value to improve tracking
    maxarea = 0
    minarea = 1

    areasize = 0

    while contour:
        bound_rect = cv.BoundingRect(list(contour))
        contour = contour.h_next()
        pt1 = (bound_rect[0], bound_rect[1])
        pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
        areasize = fabs(bound_rect[2] * bound_rect[3])
        if (areasize > maxareasize):
            maxareasize = areasize
            maxtrackedpoint = (int(
                (pt1[0] + pt2[0]) / 2), int((pt1[1] + pt2[1]) / 2), 1.0)
            cv.Rectangle(rgb_image, pt1, pt2, cv.CV_RGB(255, 0, 0), 1)

    trackedpoint = maxtrackedpoint
    if (trackedpoint != None):
        cv.Circle(rgb_image, (trackedpoint[0], trackedpoint[1]), 5,
                  cv.CV_RGB(255, 0, 0), 1)
    return trackedpoint
Beispiel #24
0
capture = cv.CaptureFromCAM(0)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 1280)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 720)
frame = cv.QueryFrame(capture)
test = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.NamedWindow("output")
previous_x = 0
previous_y = 0
while (1):
    frame = cv.QueryFrame(capture)
    cv.Flip(frame, frame, 1)
    imdraw = cv.CreateImage(cv.GetSize(frame), 8,
                            3)  # we make all drawings on imdraw.
    imgyellowthresh = getthresholdedimg(
        frame)  # we get coordinates from imgyellowthresh
    cv.Erode(imgyellowthresh, imgyellowthresh, None,
             1)  # eroding removes small noises
    (leftmost, rightmost, topmost, bottommost) = getpositions(imgyellowthresh)
    if (leftmost - rightmost != 0) or (topmost - bottommost != 0):
        lastx = posx
        lasty = posy
        posx = cv.Round((rightmost + leftmost) / 2)
        posy = cv.Round((bottommost + topmost) / 2)
        if lastx != 0 and lasty != 0:
            win32api.SetCursorPos((posx, posy))

    cv.Add(test, imdraw, test)
    cv.ShowImage("output", test)
    if cv.WaitKey(10) >= 0:
        break
cv.DestroyWindow("output")
Beispiel #25
0

while(1):
	# captures feed from video in color
	color_image = cv.QueryFrame(capture)
	
	# ??
	imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3)
	
	# ??
	cv.SetZero(imdraw)
	cv.Flip(color_image,color_image, 1)
	cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)
	# ??
	imgbluethresh = getthresholdedimg(color_image)
	cv.Erode(imgbluethresh, imgbluethresh, None,  3)
	cv.Dilate(imgbluethresh, imgbluethresh, None, 10)
	# ??
	img2 = cv.CloneImage(imgbluethresh)
	# ??
	storage = cv.CreateMemStorage(0)
	contour = cv.FindContours(imgbluethresh, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
	
	# blank list into which points for bounding rectangles around blobs are appended
	points = []	

	# this is the new part here. ie use of cv.BoundingRect()
	while contour:
		
		# Draw bounding rectangles
		bound_rect = cv.BoundingRect(list(contour))
Beispiel #26
0
def Closing(pos):
    element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                            element_shape)
    cv.Dilate(src, image, element, 1)
    cv.Erode(image, dest, element, 1)
    cv.ShowImage("Opening & Closing", dest)