예제 #1
0
def repeat():
    global capture  #declare as globals since we are assigning to them now
    global camera_index
    global done

    frame = cv.QueryFrame(capture)
    cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 3, 3)

    imgHsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
    cv.CvtColor(frame, imgHsv, cv.CV_BGR2HSV)
    #imgHsv2 = GetThresholdedImage(imgHsv)
    #print(numpy.asarray(cv.GetMat(imgHsv)))

    imgRGBA = cv.CreateImage(cv.GetSize(frame), 8, 4)
    cv.CvtColor(frame, imgRGBA, cv.CV_BGR2RGBA)

    cv.Smooth(imgRGBA, imgRGBA, cv.CV_GAUSSIAN, 3, 3)
    (filteredImg, offsetX, offsetY) = parallelSumRed(imgRGBA, 640,
                                                     480)  #3D array

    d = numpy.sqrt(offsetX * offsetX + offsetY * offsetY)

    if d != 0:
        print("Distance = " + str(c1 / d + c2) + "cm")
        print("OffsetX = " + str(offsetX) + "; OffsetY = " + str(offsetY))
        print("")

    imgRGB = cv.CreateImage(cv.GetSize(frame), 8, 3)
    #cv.CvtColor(Image.fromarray(filteredImg), imgRGB, cv.CV_RGBA2RGB)

    imgRGBA = cv.fromarray(numpy.reshape(filteredImg, (480, 640, 4)))
    if offsetX != 0 or offsetY != 0:
        cv.Rectangle(imgRGBA, (320 + offsetX - 6, 240 + offsetY - 6),
                     (320 + offsetX + 6, 240 + offsetY + 6),
                     (255, 0, 255, 255), 1, 8)
        cv.Line(imgRGBA, (0, 240 + offsetY), (639, 240 + offsetY),
                (255, 0, 255, 255), 1, 8)
        cv.Line(imgRGBA, (320 + offsetX, 0), (320 + offsetX, 479),
                (255, 0, 255, 255), 1, 8)

    cv.ShowImage(HSVWindow, imgRGBA)
    cv.ShowImage(original, frame)

    cv.SetMouseCallback(original, onMouseMove, [
        cv.CV_EVENT_MOUSEMOVE,
        numpy.asarray(cv.GetMat(imgHsv)),
        numpy.asarray(cv.GetMat(frame))
    ])
    #cv.SetMouseCallback(HSVWindow, onMouseMove, [cv.CV_EVENT_MOUSEMOVE, numpy.asarray(cv.GetMat(imgHsv)), numpy.asarray(cv.GetMat(frame))])

    #cv.ShowImage(filtered, imgHsv2)
    c = cv.WaitKey(10)

    if (str(c) == "27"):  #if ESC is pressed
        print("Thank You!")
        done = True
    if (str(c) == "99"):  #'c' for calibration
        calibration(int(input("How many data points: ")))
예제 #2
0
파일: lrf.py 프로젝트: Mnemonic7/lrf
def difference_image(img1, img2):
    print " simg1 = simplify(img1)"
    simg1 = simplify(img1)
    print " simg2 = simplify(img2)"
    simg2 = simplify(img2)

    #dbg_image('simg1',simg1)
    #dbg_image('simg2',simg2)

    #create image buffers
    img3 = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1)
    simg3 = cv.CloneImage(img3)
    bitimage = cv.CreateImage(cv.GetSize(img2), cv.IPL_DEPTH_8U, 1)
    eimg3 = cv.CloneImage(bitimage)

    #process
    print " cv.AbsDiff(simg2,simg1,img3)"
    cv.AbsDiff(simg2, simg1, img3)
    print " cv.Smooth(img3,simg3)"
    cv.Smooth(img3, simg3)
    #dbg_image('simg3',simg3)
    # these threshold values must be calibrated
    #cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_TOZERO_INV)
    print " cv.Threshold(simg3,bitimage,16,255,cv.CV_THRESH_BINARY)"
    cv.Threshold(simg3, bitimage, 50, 255, cv.CV_THRESH_BINARY)
    #dbg_image('bitimage',bitimage)
    print " cv.Erode(bitimage,eimg3)"
    cv.Erode(bitimage, eimg3)
    #dbg_image('eimg3',eimg3)
    return eimg3
예제 #3
0
    def preproc_map_img(self, map_img):
        """ Preprocesses the map image Soft, erode or whtaever it is necessary to improve the input"""
        #Apply threshold to have just black and white
        thresh_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Threshold(map_img, thresh_img, 250, 255, cv.CV_THRESH_BINARY)

        #Blur map's thresholded image
        soft_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Smooth(thresh_img, soft_img, cv.CV_GAUSSIAN, 9, 9)

        #Dilate the inverse map to get it's skeleton
        dilated_img = cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Dilate(soft_img, dilated_img, iterations=20)

        #Create inverse image
#        dilated_inverted_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
#        for r in range(0,dilated_img.rows):
#            for c in range(0,dilated_img.cols):
#                dilated_inverted_img[r,c]=255-dilated_img[r,c]

        #Enhance image edges for hough transformdilated_img
        canny_img=cv.CreateMat(map_img.height, map_img.width, cv.CV_8UC1)
        cv.Canny(soft_img, canny_img, 200,220)

        preprocessed_map = dilated_img
        return preprocessed_map
예제 #4
0
def laplacian():
    cv.Smooth(src_image, dst_image, cv.CV_GAUSSIAN, 9, 9)
    dst_img = cv.CreateImage((dst_image.width, dst_image.height), 8, 1)
    cv.CvtColor(dst_image, dst_img, cv.CV_RGB2GRAY)
    cv.Laplace(dst_img, dst_img, 5)
    display(dst_img, "Laplacian")
    cv.WaitKey(0)
예제 #5
0
    def processImage(self, curframe):
        cv.Smooth(curframe, curframe)  #Remove false positives

        if not self.absdiff_frame:  #For the first time put values in difference, temp and moving_average
            self.absdiff_frame = cv.CloneImage(curframe)
            self.previous_frame = cv.CloneImage(curframe)
            cv.Convert(
                curframe, self.average_frame
            )  #Should convert because after runningavg take 32F pictures
        else:
            cv.RunningAvg(curframe, self.average_frame,
                          0.05)  #Compute the average

        cv.Convert(self.average_frame,
                   self.previous_frame)  #Convert back to 8U frame

        cv.AbsDiff(curframe, self.previous_frame,
                   self.absdiff_frame)  # moving_average - curframe

        cv.CvtColor(
            self.absdiff_frame, self.gray_frame,
            cv.CV_RGB2GRAY)  #Convert to gray otherwise can't do threshold
        cv.Threshold(self.gray_frame, self.gray_frame, 50, 255,
                     cv.CV_THRESH_BINARY)

        cv.Dilate(self.gray_frame, self.gray_frame, None,
                  15)  #to get object blobs
        cv.Erode(self.gray_frame, self.gray_frame, None, 10)
예제 #6
0
    def post_process_distance_img(self, dist_img):
        inverted_img=cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)

        #Blur image
        soft_img=cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)
        cv.Smooth(dist_img, soft_img, cv.CV_GAUSSIAN, 21, 21)


        #Apply threshold to have just black and white
        thresh_img=cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)
        cv.Threshold(soft_img, thresh_img, 1, 255, cv.CV_THRESH_BINARY)#CV_THRESH_OTSU is an adaptive thresholding method



#        #Create inverse image
#        for r in range(0,thresh_img.rows):
#            for c in range(0,thresh_img.cols):
#                inverted_img[r,c]=255-thresh_img[r,c]

        #Erode the inverse map to get it's skeleton
        eroded_img = cv.CreateMat(dist_img.height, dist_img.width, cv.CV_8UC1)
        cv.Erode(inverted_img, eroded_img, iterations=10)

        #Create inverse image
        for r in range(0,eroded_img.rows):
            for c in range(0,eroded_img.cols):
                inverted_img[r,c]=255-eroded_img[r,c]

        return inverted_img
예제 #7
0
def smooth(grayscale, gaussian_std=5.0):
    """ 
        Smooths an image with a Gaussian filter.
        
        :param grayscale:  A field to derive.
        :type  grayscale:  HxW array float
        
        :param gaussian_std: Std-deviation of the Gaussian filter.
        :type  gaussian_std: float,>0
     
        :return: smoothed: The smoothed image.
        :rtype: array(HxW,float)
        
    """

    check_2d_array(grayscale, "grayscale")
    grayscale = grayscale.astype('float32')

    im = numpy_to_cv(grayscale)
    shape = (im.width, im.height)
    smoothed = cv.CreateImage(shape, cv.IPL_DEPTH_32F, 1)

    cv.Smooth(im, smoothed, cv.CV_GAUSSIAN, 0, 0, gaussian_std)

    result_a = cv_to_numpy(smoothed).squeeze()
    return result_a
예제 #8
0
def smooth(image):
    mat = cv.fromarray(image)
    dest = cv.fromarray(np.zeros((len(image), len(image)), np.uint8))
    #GaussianSmooth
    #cv.Smooth(mat,dest)
    cv.Smooth(mat, dest, smoothtype=cv.CV_MEDIAN)
    #cv.Set(dest)

    return np.asarray(dest[:][:], np.uint8)
예제 #9
0
def gaussianFilter(inputImage, filterSize):
    """
        Apply gaussian filter of OpenCv to a given array
    """
    # Convert from numpy array  to CvMat
    outputImage = cv.CreateImage(cv.GetSize(inputImage), cv.IPL_DEPTH_32F, 1)

    cv.Smooth(inputImage, outputImage, cv.CV_GAUSSIAN, filterSize, filterSize)

    return outputImage
예제 #10
0
def medianfiltering():
    src = cv.LoadImageM(k, cv.CV_LOAD_IMAGE_COLOR)
    dst = cv.CreateImage((src.width, src.height), 8, src.channels)
    cv.SetZero(dst)
    cv.NamedWindow("Median Filtering", 1)
    cv.NamedWindow("After Filtering", 1)
    cv.Smooth(src, dst, cv.CV_MEDIAN, 9, 9)
    cv.ShowImage("Median Filtering", src)
    cv.ShowImage("After Filtering", dst)
    cv.WaitKey(0)
예제 #11
0
    def processImage(self, frame):
        cv.CvtColor(frame, self.frame2gray, cv.CV_RGB2GRAY)

        #Absdiff to get the difference between to the frames
        cv.AbsDiff(self.frame1gray, self.frame2gray, self.res)

        #Remove the noise and do the threshold
        cv.Smooth(self.res, self.res, cv.CV_BLUR, 5, 5)
        cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_OPEN)
        cv.MorphologyEx(self.res, self.res, None, None, cv.CV_MOP_CLOSE)
        cv.Threshold(self.res, self.res, 10, 255, cv.CV_THRESH_BINARY_INV)
예제 #12
0
def findOffsetInOneFrame():
    global capture  #declare as globals since we are assigning to them now
    global camera_index

    frame = cv.QueryFrame(capture)
    cv.Smooth(frame, frame, cv.CV_GAUSSIAN, 3, 3)

    imgHsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
    cv.CvtColor(frame, imgHsv, cv.CV_BGR2HSV)
    #imgHsv2 = GetThresholdedImage(imgHsv)
    #print(numpy.asarray(cv.GetMat(imgHsv)))

    imgRGBA = cv.CreateImage(cv.GetSize(frame), 8, 4)
    cv.CvtColor(frame, imgRGBA, cv.CV_BGR2RGBA)

    cv.Smooth(imgRGBA, imgRGBA, cv.CV_GAUSSIAN, 3, 3)
    (filteredImg, offsetX, offsetY) = parallelSumRed(imgRGBA, 640,
                                                     480)  #3D array

    return numpy.sqrt(offsetX * offsetX + offsetY * offsetY)  #d
    def smooth(self, image, param1=5, param2=16):

        smoothed = cv.CreateImage(cv.GetSize(image), image.depth,
                                  image.channels)
        cv.Smooth(image,
                  smoothed,
                  smoothtype=cv.CV_GAUSSIAN_5x5,
                  param1=5,
                  param2=16)

        return smoothed
예제 #14
0
def getThresholdImage(im):
    newim = cv.CloneImage(im)
    cv.Smooth(newim, newim, cv.CV_BLUR, 12)  #Remove noise

    hsv = cv.CreateImage(cv.GetSize(im), 8, 3)
    cv.CvtColor(newim, hsv, cv.CV_BGR2HSV)  # Convert image to HSV
    imThreshed = cv.CreateImage(cv.GetSize(im), 8, 1)
    #Do the threshold on the hsv image, with the right range for the yellow color
    cv.InRangeS(hsv, cv.Scalar(20, 100, 100), cv.Scalar(30, 255, 255),
                imThreshed)
    del hsv
    return imThreshed
def Color_callibration(capture):
    vals = []
    bgr = []
    mini = [255, 255, 255]
    maxi = [0, 0, 0]
    cv.NamedWindow("BGR", 0)
    print 'Please Put Your color in the circular area.Press ESC to start Callibration:'
    while 1:
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    print 'Starting Callibration...Analyzing the Object...'
    for i in range(0, 100):
        image = cv.QueryFrame(capture)
        cv.Flip(image, image, 1)
        cv.Smooth(image, image, cv.CV_MEDIAN, 3, 0)
        imagehsv = cv.CreateImage(cv.GetSize(image), 8, 3)
        cv.CvtColor(image, imagehsv, cv.CV_BGR2YCrCb)
        vals = cv.Get2D(imagehsv, 300, 200)
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8)
        cv.PutText(
            image,
            "  " + str(vals[0]) + "," + str(vals[1]) + "," + str(vals[2]),
            (200, 300), font, (55, 25, 255))
        for j in range(0, 3):
            if (vals[j] < mini[j]): mini[j] = vals[j]
            if (vals[j] > maxi[j]): maxi[j] = vals[j]
        cv.Circle(image, (int(200), int(300)), 10, cv.CV_RGB(255, 255, 255), 4)
        cv.ShowImage("BGR", image)
        c = cv.WaitKey(33)
        if c == 27:
            break
    print 'Analyzation Completed'
    mini[0] -= 35
    mini[1] -= 15
    mini[2] -= 15
    maxi[0] += 35
    maxi[1] += 15
    maxi[2] += 15
    for i in range(0, 3):
        if (mini[i] < 0):
            mini[i] = 0
        if (maxi[i] > 255):
            maxi[i] = 255
    cv.DestroyWindow("BGR")
    bgr = (mini, maxi)
    return bgr
예제 #16
0
def main():

    # captured image size, change to whatever you want
    width = 320
    height = 240

    capture = cv.CreateCameraCapture(0)

    # Over-write default captured image size
    cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_WIDTH,width)
    cv.SetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_HEIGHT,height)

    cv.NamedWindow( "output", 1 )
    cv.NamedWindow( "processed", 1 )

    while True:

        frame = cv.QueryFrame(capture)
        cv.Smooth(frame, frame, cv.CV_BLUR, 3)

        imgColorProcessed = ColorProcess(frame)
        mat = cv.GetMat(imgColorProcessed)

        # Calculating the moments
        moments = cv.Moments(mat, 0)
        area = cv.GetCentralMoment(moments, 0, 0)
        moment10 = cv.GetSpatialMoment(moments, 1, 0)
        moment01 = cv.GetSpatialMoment(moments, 0,1)

        # Finding a big enough blob
        if(area > 60000):

            # Calculating the center postition of the blob
            posX = int(moment10 / area)
            posY = int(moment01 / area)

            # check slave status and send coordinates
            state = readData()
            if state == 1:
                sendData(posX)
                sendData(posY)
                print 'x: ' + str(posX) + ' y: ' + str(posY)

		# update video windows
        cv.ShowImage("processed", imgColorProcessed)
        cv.ShowImage("output", frame)

        if cv.WaitKey(10) >= 0:
            break

    return;
예제 #17
0
def sobel():
    cv.Smooth(src_image, dst_image, cv.CV_GAUSSIAN, 3, 3)
    src_gray = cv.CreateImage((src_image.width, src_image.height), 8, 1)
    dst_gray1 = cv.CreateImage((src_image.width, src_image.height), 8, 1)
    dst_gray = cv.CreateImage((src_image.width, src_image.height), 8, 1)
    cv.CvtColor(src_image, src_gray, cv.CV_BGR2GRAY)
    cv.Sobel(src_gray, dst_gray1, 0, 1, 3)
    cv.ConvertScaleAbs(dst_gray1, dst_gray1, 1, 0)
    cv.Sobel(src_gray, dst_gray, 1, 0, 3)
    cv.ConvertScaleAbs(dst_gray, dst_gray, 1, 0)
    cv.AddWeighted(dst_gray, 0.5, dst_gray1, 0.5, 0, dst_gray)
    cv.NamedWindow("Destination Image")
    cv.ShowImage("Destination Image", dst_gray)
    cv.WaitKey(0)
예제 #18
0
def extractEyeBrows(originalImage, pt1, centerX, centerY, eyeBallParams):

    (eyeBallCenterX, eyeBallCenterY, eyeBallRadius) = eyeBallParams
    # find good features
    #                 eig_image = cv.CreateMat(gray_im.rows, gray_im.cols, cv.CV_32FC1)
    #                 temp_image = cv.CreateMat(gray_im.rows, gray_im.cols, cv.CV_32FC1)
    #                 for (x,y) in cv.GoodFeaturesToTrack(gray_im, eig_image, temp_image, 10, 0.04, 1.0, useHarris = True):
    #                     print "good feature at", x,y
    #                     cv.Rectangle(img, (int(x), int(y)),(int(x) + 20, int(y) + 20), cv.RGB(255, 255, 255))

    #find color of the skin
    #prepare histogram

    eyebrow_Area = cv.GetSubRect(
        originalImage, (int(pt1[0] * 1.1), int(
            pt1[1] * 1.2), centerX - pt1[0], int((centerY - pt1[1]) * 0.6)))
    eyebrow_Area2 = cv.CloneMat(eyebrow_Area)
    cv.Smooth(eyebrow_Area2, eyebrow_Area2, cv.CV_GAUSSIAN, 9, 1)

    hsv_image = cv.CreateMat(eyebrow_Area.height, eyebrow_Area.width,
                             cv.CV_8UC3)
    imageArray = np.asarray(eyebrow_Area2, dtype=np.uint8)

    hsv_image = cv2.cvtColor(imageArray, cv2.COLOR_BGR2HSV)

    #                 histogram2 = hs_histogram(leftEyeArea)
    #                 print(histogram2)
    #                 imageArray2 = np.asarray(histogram2, dtype=np.uint8)
    #                 cv2.imshow("histo " , histogram2)

    #
    #dark = imageArray[...,2] < 32
    #set not frequent to dark
    #imageArray[dark] = 0
    #histogram = cv.CreateHist(2, cv.CV_HIST_ARRAY)
    histogram = cv2.calcHist([hsv_image], [0, 1], None, [180, 256],
                             [0, 180, 0, 256])

    h1 = np.clip(histogram * 0.005 * hist_scale, 0, 1)
    vis = hsv_map * h1[:, :, np.newaxis] / 255.0
    #print type(vis)
    #cv2.imshow('hist', vis)

    #backproj = None
    #cv.CalcBackProject(hsv_image, backproj, histogram)
    ranges = [0, 180, 0, 256]

    backproj = cv2.calcBackProject([hsv_image], [0, 1], histogram, ranges, 10)

    cv2.imshow("back proj ", backproj)
예제 #19
0
	def reduce_image_noise(self, image):
		"""Reduce noise from image, etc.
		
		Prameters:
			
			image
				The image to be processed
				
		"""
		
		cv.Smooth(image, image, cv.CV_BLUR, 5, 5)
		#cv.MorphologyEx(image, image, None, None, cv.CV_MOP_OPEN)
		#cv.MorphologyEx(image, image, None, None, cv.CV_MOP_CLOSE)
		
		return image
예제 #20
0
    def run(self):
        while True:
            img = cv.QueryFrame(self.capture)

            #blur the source image to reduce color noise
            cv.Smooth(img, img, cv.CV_BLUR, 3)

            #convert the image to hsv(Hue, Saturation, Value) so its
            #easier to determine the color to track(hue)
            hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
            cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

            #limit all pixels that don't match our criteria, in this case we are
            #looking for purple but if you want you can adjust the first value in
            #both turples which is the hue range(120,140).  OpenCV uses 0-180 as
            #a hue range for the HSV color model
            thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
            cv.InRangeS(hsv_img, (120, 80, 80), (140, 255, 255),
                        thresholded_img)

            #determine the objects moments and check that the area is large
            #enough to be our object
            moments = cv.Moments(cv.GetMat(thresholded_img), 0)
            area = cv.GetCentralMoment(moments, 0, 0)

            #there can be noise in the video so ignore objects with small areas
            if (area > 10000):
                #determine the x and y coordinates of the center of the object
                #we are tracking by dividing the 1, 0 and 0, 1 moments by the area
                x = cv.GetSpatialMoment(moments, 1, 0) / area
                y = cv.GetSpatialMoment(moments, 0, 1) / area

                #print 'x: ' + str(x) + ' y: ' + str(y) + ' area: ' + str(area)

                #create an overlay to mark the center of the tracked object
                overlay = cv.CreateImage(cv.GetSize(img), 8, 3)

                cv.Circle(overlay, (int(x), int(y)), 2, (255, 255, 255), 20)
                #cv.Add(img, overlay, img)
                #add the thresholded image back to the img so we can see what was
                #left after it was applied
                #cv.Merge(thresholded_img, None, None, None, img)

            #display the image
            cv.ShowImage(color_tracker_window, img)

            if cv.WaitKey(10) == 27:
                break
예제 #21
0
def on_trackbar(position):

    cv.Smooth(gray, edge, cv.CV_BLUR, 3, 3, 0)
    cv.Not(gray, edge)

    # run the edge dector on gray scale
    cv.Canny(gray, edge, position, position * 3, 3)

    # reset
    cv.SetZero(col_edge)

    # copy edge points
    cv.Copy(im, col_edge, edge)

    # show the im
    cv.ShowImage(win_name, col_edge)
예제 #22
0
def run():
	# Capture image as CvCapture object
	capture = cv.CaptureFromFile('blob.jpeg')

	# get image from capture
	image = cv.QueryFrame(capture)

	# Smooth to get rid of false positives
	cv.Smooth(image,image,cv.CV_GAUSSIAN,19,0)

	# Display smoothed image
	cv.ShowImage('Blobs',image)

	findBlobs(image)

	cv.WaitKey(10000)
예제 #23
0
    def on_trackbar(self, position):

        cv.Smooth(self.source_image, self.edge, cv.CV_BLUR, 3, 3, 0)
        cv.Not(self.source_image, self.edge)

        # run the edge dector on gray scale
        cv.Canny(self.source_image, self.edge, position, position * 3, 3)

        # reset
        cv.SetZero(self.col_edge)

        # copy edge points
        cv.Copy(self.source_color, self.col_edge, self.edge)

        # show the im
        cv.ShowImage(win_name, self.col_edge)
        self.process_image(position)
예제 #24
0
    def test_tangent_45plusdegree(self):
        degree45 = cv.LoadImage(
            os.path.abspath(os.environ['BORG'] +
                            '/Brain/data/hog_test/45degree.jpg'))

        smoothed = cv.CreateImage(cv.GetSize(degree45), degree45.depth,
                                  degree45.channels)
        cv.Smooth(degree45, smoothed)
        image = smoothed
        (dx, dy) = self.gradient.sobelGradient(image)
        canny = self.gradient.cannyGradient(image)
        tangent = self.gradient.tangent(dx, dy, canny)

        size = cv.GetSize(tangent)

        for x in range(size[0]):
            for y in range(size[1]):
                if canny[y, x] > 0:
                    tang = math.atan2(dy[y, x], dx[y, x]) * self.constant
                    tang = int(tang)
                    if tang < 0:
                        tang = 360 + tang
                    '''if dx[y,x] < 0 and dy[y,x] >= 0:
                        tang += 180
                    elif dx[y,x] < 0 and dy[y,x] < 0:
                        tang -= 180'''
                else:
                    tang = 0
                if tangent[y, x] != tang:
                    print 'DX ->column: ', x, ' ,Row: ', y, ' ,Value: ', dx[y,
                                                                            x]
                    print 'DY ->column: ', x, ' ,Row: ', y, ' ,Value: ', dy[y,
                                                                            x]
                    print 'Real tangent:', tang
                    print 'The tangent from function', tangent[y, x]
                    self.fail("The tangents do not match")

                if canny[y, x] > 0 and not (
                    (tangent[y, x] >= 180 and tangent[y, x] <= 270)):
                    print 'The tangent from function', tangent[
                        y, x], "calculated here is:", tang
                    self.fail(
                        "The tangent is not in the range of 225 and  270. (45 degrees)"
                    )
예제 #25
0
def findImage(img):
    #Set up storage for images
    frame_size = cv.GetSize(img)
    img2 = cv.CreateImage(frame_size,8,3)
    tmp = cv.CreateImage(frame_size,8,cv.CV_8U)
    h = cv.CreateImage(frame_size,8,1)

    #copy original image to do work on
    cv.Copy(img,img2)

    #altering the image a bit for smoother processing
    cv.Smooth(img2,img2,cv.CV_BLUR,3)
    cv.CvtColor(img2,img2,cv.CV_BGR2HSV)

    #make sure temp is empty
    cv.Zero(tmp)

    #detection based on HSV value
    #30,100,90 lower limit on pic 41,255,255 on pic
    #cv.InRangeS(img2,cv.Scalar(25,100,87),cv.Scalar(50,255,255),tmp)
    #Range for green plate dot in my Living room
    #cv.InRangeS(img2,cv.Scalar(55,80,60),cv.Scalar(65,95,90),tmp)
    #classroom
    #cv.InRangeS(img2,cv.Scalar(55,80,60),cv.Scalar(70,110,70),tmp)
    #Kutztowns Gym
    cv.InRangeS(img2,cv.Scalar(65,100,112),cv.Scalar(85,107,143),tmp)

    elmt_shape=cv.CV_SHAPE_ELLIPSE
    pos = 3
    element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, elmt_shape)
    cv.Dilate(tmp,tmp,element,6)
    cv.Erode(tmp,tmp,element,2)

    cv.Split(tmp,h,None,None,None)
    storage = cv.CreateMemStorage()

    scan = sc.FindContours(h,storage)
    xyImage=drawCircles(scan,img)

    if xyImage != None:
            return (xyImage,tmp)
    else:
            return None
예제 #26
0
    def applyEffect(self, image, width, height):
        ipl_img = cv2.cv.CreateImageHeader((image.shape[1], image.shape[0]),
                                           cv.IPL_DEPTH_8U, 3)
        cv2.cv.SetData(ipl_img, image.tostring(),
                       image.dtype.itemsize * 3 * image.shape[1])

        gray = cv.CreateImage((width, height), 8, 1)  #tuple as the first arg

        dst_img = cv.CreateImage(cv.GetSize(ipl_img), cv.IPL_DEPTH_8U,
                                 3)  #_16S  => cv2.cv.iplimage
        if self.effect == 'dilate':
            cv.Dilate(ipl_img, dst_img, None, 5)
        elif self.effect == 'laplace':
            cv.Laplace(ipl_img, dst_img, 3)
        elif self.effect == 'smooth':
            cv.Smooth(ipl_img, dst_img, cv.CV_GAUSSIAN)
        elif self.effect == 'erode':
            cv.Erode(ipl_img, dst_img, None, 1)

        cv.Convert(dst_img, ipl_img)
        return self.ipl2tk_image(dst_img)
예제 #27
0
    def motionDetect(self, img):
        cv.Smooth(img, img, cv.CV_GAUSSIAN, 3, 0)

        cv.RunningAvg(img, self.movingAvg, 0.020, None)
        cv.ConvertScale(self.movingAvg, self.tmp, 1.0, 0.0)
        cv.AbsDiff(img, self.tmp, self.diff)
        cv.CvtColor(self.diff, self.grayImage, cv.CV_RGB2GRAY)
        cv.Threshold(self.grayImage, self.grayImage, 70,255, cv.CV_THRESH_BINARY)
        cv.Dilate(self.grayImage, self.grayImage, None, 18)#18   
        cv.Erode(self.grayImage, self.grayImage, None, 10)#10
        storage = cv.CreateMemStorage(0)
        contour = cv.FindContours(self.grayImage, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
#        points = []                                                                                      
        while contour:
            boundRect = cv.BoundingRect(list(contour))
            contour = contour.h_next()
            pt1 = (boundRect[0], boundRect[1])
            pt2 = (boundRect[0] + boundRect[2], boundRect[1] + boundRect[3])
            cv.Rectangle(img, pt1, pt2, cv.CV_RGB(255,255,0), 1)

        return img
예제 #28
0
    def processaImagem(self):
        """
        Crio uma imagem cinza a partir da atual para o programa ficar mais rapido, crio uma imagem com a
        diferenca da imagem anterior e a imagem atual, e binarizo a imagem cinza para filtrar pixels pequenos.
        """
        # Remove os falsos positivos.
        cv.Smooth(self.imagem_atual, self.imagem_atual)

        # Aqui eu coloco um tempo de execucao entre as imagens.
        cv.RunningAvg(self.imagem_atual, self.imagem_auxiliar, 0.05)

        # Covertendo de volta a imagem para poder trabalhar.
        cv.Convert(self.imagem_auxiliar, self.imagem_anterior)

        # Cria uma nova imagem com a diferenca entre a imagem anterior e a atual.
        cv.AbsDiff(self.imagem_atual, self.imagem_anterior, self.imagem_diferenca)

        # Converte a imagem atual em escala de cinza.
        cv.CvtColor(self.imagem_diferenca, self.imagem_cinza, cv.CV_RGB2GRAY)

        # Binariza a imagem. Para poder filtrar pixels pequenos.
        cv.Threshold(self.imagem_cinza, self.imagem_cinza, 50, 255, cv.CV_THRESH_BINARY)
예제 #29
0
파일: GUI2.py 프로젝트: garpan12/Ninjabot
    def ImagePro(self, capture, orig, processed, storage, grid):
        orig = cv.QueryFrame(capture)
        #cv.Normalize(orig)
        # filter for all yellow and blue - everything else is black
        processed = processor.colorFilterCombine(orig, "yellow", "blue", s)

        # Some processing and smoothing for easier circle detection
        cv.Canny(processed, processed, 5, 70, 3)
        cv.Smooth(processed, processed, cv.CV_GAUSSIAN, 7, 7)

        #cv.ShowImage('processed2', processed)

        # Find&Draw circles
        processor.find_circles(processed, storage, 100)

        #if it is in the range of 1 to 9, we can try and recalibrate our filter
        #if 1 <= storage.rows < 10:
        #    s = autocalibrate(orig, storage)

        processor.draw_circles(storage, orig)

        #warp = processor.update_grid(storage, orig, grid)

        # Delete and recreate the storage so it has the correct width
        #del(storage)
        #storage = cv.CreateMat(orig.width, 1, cv.CV_32FC3)

        #cv.ShowImage('output', orig)

        #return processed
        #cv.ShowImage('grid', warp)

        #warp = perspective_transform(orig)
        #cv.ShowImage('warped', warp)
        mask = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 3)
        cv.Resize(orig, mask)
        return mask
예제 #30
0
# blank lists to store coordinates of blue blob
blue   = []


while(1):
	# captures feed from video in color
	color_image = cv.QueryFrame(capture)
	
	# ??
	imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3)
	
	# ??
	cv.SetZero(imdraw)
	cv.Flip(color_image,color_image, 1)
	cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3, 0)
	# ??
	imgbluethresh = getthresholdedimg(color_image)
	cv.Erode(imgbluethresh, imgbluethresh, None,  3)
	cv.Dilate(imgbluethresh, imgbluethresh, None, 10)
	# ??
	img2 = cv.CloneImage(imgbluethresh)
	# ??
	storage = cv.CreateMemStorage(0)
	contour = cv.FindContours(imgbluethresh, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
	
	# blank list into which points for bounding rectangles around blobs are appended
	points = []	

	# this is the new part here. ie use of cv.BoundingRect()
	while contour: