Пример #1
0
def update_mhi(img, dst, diff_threshold):
    global last
    global mhi
    global storage
    global mask
    global orient
    global segmask
    timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds
    size = cv.GetSize(img) # get current frame size
    idx1 = last
    if not mhi or cv.GetSize(mhi) != size:
        for i in range(N):
            buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
            cv.Zero(buf[i])
        mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        cv.Zero(mhi) # clear MHI at the beginning
        orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
        mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1)

    cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale
    idx2 = (last + 1) % N # index of (last - (N-1))th frame
    last = idx2
    silh = buf[idx2]
    cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames
    cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it
    cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI
    cv.CvtScale(mhi, mask, 255./MHI_DURATION,
                (MHI_DURATION - timestamp)*255./MHI_DURATION)
    cv.Zero(dst)
    cv.Merge(mask, None, None, None, dst)
    cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3)
    if not storage:
        storage = cv.CreateMemStorage(0)
    seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA)
    for (area, value, comp_rect) in seq:
        if comp_rect[2] + comp_rect[3] > 100: # reject very small components
            color = cv.CV_RGB(255, 0,0)
            silh_roi = cv.GetSubRect(silh, comp_rect)
            mhi_roi = cv.GetSubRect(mhi, comp_rect)
            orient_roi = cv.GetSubRect(orient, comp_rect)
            mask_roi = cv.GetSubRect(mask, comp_rect)
            angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)

            count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI
            if count < (comp_rect[2] * comp_rect[3] * 0.05):
                continue

            magnitude = 30.
            center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2))
            cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0)
            cv.Line(dst,
                    center,
                    (cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)),
                     cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))),
                    color,
                    3,
                    cv.CV_AA,
                    0)
Пример #2
0
 def get_shuffled_channels(self):
     ''' Create a list of images with all channels combination '''
     b, g, r = self.get_separated_channels()
     l = []
     for x, y, z in [(r, g, b), (r, b, g), (g, r, b), (g, b, r), (b, g, r),
                     (b, r, g)]:
         merged = cv.CreateImage(cv.GetSize(self.image), 8, 3)
         cv.Merge(x, y, z, None, merged)
         l.append(merged)
     return l
def clone_color_image(img):
    '''Create a new color image based on a grayscale image'''
    roi = cv.GetImageROI(img)

    color = cv.CreateImage((img.width, img.height), img.depth, 4)
    cv.SetImageROI(color, roi)

    empty = cv.CreateImage((img.width, img.height), img.depth, 1)
    cv.SetImageROI(empty, roi)

    cv.Merge(img, img, img, empty, color)
    return color
Пример #4
0
def run(self): 
    while True: 
        img = cv.QueryFrame( self.capture ) 

        #blur the source image to reduce color noise 
        cv.Smooth(img, img, cv.CV_BLUR, 3); 

        #convert the image to hsv(Hue, Saturation, Value) so its  
        #easier to determine the color to track(hue) 
        hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3) 
        cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV) 

        #limit all pixels that don't match our criteria, in this case we are  
        #looking for purple but if you want you can adjust the first value in  
        #both turples which is the hue range(120,140).  OpenCV uses 0-180 as  
        #a hue range for the HSV color model 
        thresholded_img =  cv.CreateImage(cv.GetSize(hsv_img), 8, 1) 
        cv.InRangeS(hsv_img, (120, 80, 80), (140, 255, 255), thresholded_img) 

        #determine the objects moments and check that the area is large  
        #enough to be our object 
        moments = cv.Moments(thresholded_img, 0) 
        area = cv.GetCentralMoment(moments, 0, 0) 

        #there can be noise in the video so ignore objects with small areas 
        if(area > 100000): 
            #determine the x and y coordinates of the center of the object 
            #we are tracking by dividing the 1, 0 and 0, 1 moments by the area 
            x = cv.GetSpatialMoment(moments, 1, 0)/area 
            y = cv.GetSpatialMoment(moments, 0, 1)/area 

            #print 'x: ' + str(x) + ' y: ' + str(y) + ' area: ' + str(area) 

            #create an overlay to mark the center of the tracked object 
            overlay = cv.CreateImage(cv.GetSize(img), 8, 3) 

            cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20) 
            cv.Add(img, overlay, img) 
            #add the thresholded image back to the img so we can see what was  
            #left after it was applied 
            cv.Merge(thresholded_img, None, None, None, img) 

        #display the image  
        cv.ShowImage(color_tracker_window, img) 

        if cv.WaitKey(10) == 27: 
            break 
Пример #5
0
def on_trackbar(edge_thresh):

    cv.Threshold(gray, edge, float(edge_thresh), float(edge_thresh),
                 cv.CV_THRESH_BINARY)
    #Distance transform
    cv.DistTransform(edge, dist, cv.CV_DIST_L2, cv.CV_DIST_MASK_5)

    cv.ConvertScale(dist, dist, 5000.0, 0)
    cv.Pow(dist, dist, 0.5)

    cv.ConvertScale(dist, dist32s, 1.0, 0.5)
    cv.AndS(dist32s, cv.ScalarAll(255), dist32s, None)
    cv.ConvertScale(dist32s, dist8u1, 1, 0)
    cv.ConvertScale(dist32s, dist32s, -1, 0)
    cv.AddS(dist32s, cv.ScalarAll(255), dist32s, None)
    cv.ConvertScale(dist32s, dist8u2, 1, 0)
    cv.Merge(dist8u1, dist8u2, dist8u2, None, dist8u)
    cv.ShowImage(wndname, dist8u)
Пример #6
0
    elif len(sys.argv) == 2:
        capture = cv.CreateFileCapture(sys.argv[1])

    if not capture:
        print "Could not initialize capturing..."
        sys.exit(-1)

    cv.NamedWindow("Laplacian", 1)

    while True:
        frame = cv.QueryFrame(capture)
        if frame:
            if not laplace:
                planes = [cv.CreateImage((frame.width, frame.height), 8, 1) for i in range(3)]
                laplace = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_16S, 1)
                colorlaplace = cv.CreateImage((frame.width, frame.height), 8, 3)

            cv.Split(frame, planes[0], planes[1], planes[2], None)
            for plane in planes:
                cv.Laplace(plane, laplace, 3)
                cv.ConvertScaleAbs(laplace, plane, 1, 0)

            cv.Merge(planes[0], planes[1], planes[2], None, colorlaplace)

            cv.ShowImage("Laplacian", colorlaplace)

        if cv.WaitKey(10) != -1:
            break

    cv.DestroyWindow("Laplacian")
Пример #7
0
    if len(sys.argv) > 1:
        im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
    else:
        url = 'https://raw.github.com/opencv/opencv/master/samples/c/baboon.jpg'
        filedata = urllib2.urlopen(url).read()
        imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
        cv.SetData(imagefiledata, filedata, len(filedata))
        im = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)

    realInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 1)
    imaginaryInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 1)
    complexInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 2)

    cv.Scale(im, realInput, 1.0, 0.0)
    cv.Zero(imaginaryInput)
    cv.Merge(realInput, imaginaryInput, None, None, complexInput)

    dft_M = cv.GetOptimalDFTSize( im.height - 1 )
    dft_N = cv.GetOptimalDFTSize( im.width - 1 )

    dft_A = cv.CreateMat( dft_M, dft_N, cv.CV_64FC2 )
    image_Re = cv.CreateImage( (dft_N, dft_M), cv.IPL_DEPTH_64F, 1)
    image_Im = cv.CreateImage( (dft_N, dft_M), cv.IPL_DEPTH_64F, 1)

    # copy A to dft_A and pad dft_A with zeros
    tmp = cv.GetSubRect( dft_A, (0,0, im.width, im.height))
    cv.Copy( complexInput, tmp, None )
    if(dft_A.width > im.width):
        tmp = cv.GetSubRect( dft_A, (im.width,0, dft_N - im.width, im.height))
        cv.Zero( tmp )
import cv2.cv as cv
import numpy as np

orig = cv.LoadImage('D:/test.png')
b = cv.CreateImage(cv.GetSize(orig), orig.depth, 1)
g = cv.CloneImage(b)
r = cv.CloneImage(b)
cv.Split(orig, b, g, r, None)

merged = cv.CreateImage(cv.GetSize(orig), 8, 3)
cv.Merge(g, b, r, None, merged)

cv.ShowImage("Image", orig)
cv.ShowImage("Blue", b)
cv.ShowImage("Green", g)
cv.ShowImage("Red", r)
cv.ShowImage("Merged", merged)

cv.WaitKey(0)
Пример #9
0
def extractEyeBall(imgMatrix):
    # img = cv2.imread("E:\\python workspace\\Charlie\\eye1.jpg")

    print type(imgMatrix)
    imageArray = np.asarray(imgMatrix)

    # (rows, cols, channels) =  img.shape

    rows = imgMatrix.rows
    cols = imgMatrix.cols

    #     print rows
    #     print cols

    cv2.imshow("input", imageArray)

    originalImage = imgMatrix

    # split them into channels
    b8u = cv.CreateMat(rows, cols, cv.CV_8UC1)  # cv.CloneMat(img)
    g8u = cv.CreateMat(rows, cols, cv.CV_8UC1)  # cv.CloneMat(img)
    r8u = cv.CreateMat(rows, cols, cv.CV_8UC1)  # cv.CloneMat(img)
    cv.Split(originalImage, b8u, g8u, r8u, None)

    # invert colors
    for i in range(rows):
        for j in range(cols):
            r8u[i, j] = 255 - r8u[i, j]
            g8u[i, j] = 255 - g8u[i, j]
            b8u[i, j] = 255 - b8u[i, j]

    cv.Merge(b8u, g8u, r8u, None, originalImage)
    if isShowImages:
        cv.ShowImage("subtraction ", originalImage)

    # convert it to grey scale
    grayScaleImage = cv.CreateMat(rows, cols, cv.CV_8UC1)
    cv.CvtColor(originalImage, grayScaleImage, cv.CV_BGR2GRAY)
    cv.ShowImage("grey ", grayScaleImage)

    greyArray = np.asarray(grayScaleImage, np.uint8, 1)
    # binaryImage = cv.CreateMat(rows, cols, cv.CV_8UC1)
    # #make it binary by making it threshold at 220
    ret, binaryImage = cv2.threshold(greyArray, 220, 255, cv2.THRESH_BINARY)

    print type(binaryImage)

    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
    # cv2.erode(binaryImage, element, binaryImage)
    # cv.Dilate(binaryImage, binaryImage)

    # cv.Erode(binaryImage, binaryImage)
    # cv.Dilate(binaryImage, binaryImage)

    # show the image
    cv2.imshow("binary image ", binaryImage)

    params = cv2.SimpleBlobDetector_Params()
    #             params.minDistBetweenBlobs = 1.0  # minimum 10 pixels between blobs
    #             params.filterByArea = True        # filter my blobs by area of blob
    #             params.minArea = 5.0             # min 20 pixels squared
    #             params.maxArea = 500.0            # max 500 pixels squared
    params.minDistBetweenBlobs = 50.0
    params.filterByInertia = False
    params.filterByConvexity = False
    params.filterByColor = False
    params.filterByCircularity = False
    params.filterByArea = True
    params.minArea = 2.0
    params.maxArea = 500.0

    myBlobDetector = cv2.SimpleBlobDetector(params)
    keypoints = myBlobDetector.detect(binaryImage)
    print "blobs " + str(keypoints)

    # extract the x y coordinates of the keypoints:

    for i in range(0, len(keypoints) - 1):
        print keypoints[i].pt
        pt1 = (int(keypoints[i].pt[0]), int(keypoints[i].pt[1]))
        pt2 = (int(keypoints[i + 1].pt[0]), int(keypoints[i + 1].pt[1]))
        cv2.line(imageArray, pt1, pt2, (255, 0, 0))
    #                 float X=keypoints[i].pt.x;
    #                 float Y=keypoints[i].pt.y;

    cv2.imshow("eye ", imageArray)
    float_img = cv.CreateMat(10, 10, cv.CV_32FC1)
    float_img = cv.Load("interest_grid.xml", cv.CreateMemStorage())
    py_img = 1.0 * np.asarray(float_img)
    py_img = 255 * (py_img / np.max(py_img))
    img = cv.fromarray(py_img.astype(np.uint8))
    rgb_img = cv.CreateImage((img.cols, img.rows), 8, 4)
    """Creating RGB img"""
    img_r = cv.CloneMat(img)
    img_g = cv.CreateImage((img.cols, img.rows), 8, 1)
    img_b = cv.CreateImage((img.cols, img.rows), 8, 1)
    img_a = cv.CreateImage((img.cols, img.rows), 8, 1)
    cv.Set(img_g, 10)
    cv.Set(img_b, 100)
    cv.Set(img_a, 100)

    cv.Merge(img_b, img_g, img_r, img_a, rgb_img)
    """Precorner detect"""
    corners = cv.CreateMat(float_img.rows, float_img.cols, float_img.type)
    cv.PreCornerDetect(float_img, corners, 3)
    """Canny"""
    edges = cv.CreateImage((img.cols, img.rows), 8, 1)
    print img.rows, img.cols, edges.height
    cv.Canny(img, edges, 20.0, 160.0)
    disp2 = edges
    """Good features to track"""
    eig_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1)
    temp_image = cv.CreateMat(img.rows, img.cols, cv.CV_32FC1)
    features_x_y_vector = cv.GoodFeaturesToTrack(img,
                                                 eig_image,
                                                 temp_image,
                                                 10,
Пример #11
0
#invert hue channel

 
#print(type(hueChannel))
 
for i in range(rows):
    for j in range(cols):
        #print valueChannel[i,j]
        #hsv_planes[2][i, j] = 180 - hsv_planes[2][i, j]
        r8u[i,j] = 255 - r8u[i,j];
        g8u[i,j] = 255 - g8u[i,j];
        b8u[i,j] = 255 - b8u[i,j];
        #cv.FloodFill(onesMatrix, (i, j), 255)
        #onesMatrix.  [i][j][0] = 255
#cv.Merge(hsv_planes, originalImage)
cv.Merge(b8u, g8u, r8u, None, originalImage)      
#convert into rgb again
#cv.CvtColor( hsvImage, originalImage, cv.CV_HSV2RGB);
        
#merge hsv together
 
 
#onesMatrix = cv.fromarray( onesArray)
 
#print img.channels()
#cv.Sub(onesMatrix, img, originalImage)
#cv.Invert(invertedImage, invertedImage)

cv.ShowImage("subtraction ", originalImage)

 
Пример #12
0
        #determine the x and y coordinates of the center of the object
        #we are tracking by dividing the 1, 0 and 0, 1 moments by the area
        x = cv.GetSpatialMoment(moments, 1, 0) / area
        y = cv.GetSpatialMoment(moments, 0, 1) / area

        ##        print 'x: ' + str(x) + ' y: ' + str(y) + ' area: ' + str(area)

        #create an overlay to mark the center of the tracked object
        overlay = cv.CreateImage(cv.GetSize(F), 8, 3)

        cv.Circle(overlay, (int(x), int(y)), 2, (255, 255, 255), 20)
        cv.Circle(tr, (int(x), int(y)), 10, (255, 255, 255), -20)
        cv.Add(F, overlay, F)
        #add the thresholded image back to the img so we can see what was
        #left after it was applied
        cv.Merge(tr, None, None, None, F)

    threshold = thresh.copy()
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(thresh, contours, -1, (255, 255, 255), 5)

    img = cv2.cvtColor(f, cv2.COLOR_BGR2GRAY)
    blur = cv2.medianBlur(f, 5)
    circles = cv2.HoughCircles(img,
                               cv2.cv.CV_HOUGH_GRADIENT,
                               1,
                               10,
                               param1=100,
                               param2=30,
                               minRadius=5,