Ejemplo n.º 1
0
def detectFaces():
    global frame_copy, min_size, image_scale, haar_scale, min_neighbors, haar_flags, cap, cam_pan, cam_tilt
    t0 = cv.GetTickCount()
    frame = cv.QueryFrame(cap)
    if not frame:
        cv.WaitKey(0)
        return False
    if not frame_copy:
        frame_copy = cv.CreateImage((frame.width,frame.height),
                                            cv.IPL_DEPTH_8U, frame.nChannels)
    if frame.origin == cv.IPL_ORIGIN_TL:
        cv.Flip(frame, frame, -1)
   
    # Our operations on the frame come here
    gray = cv.CreateImage((frame.width,frame.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(frame.width / image_scale),
                   cv.Round (frame.height / image_scale)), 8, 1)
    small_img2 = cv.CreateImage((cv.Round(frame.width / image_scale),
                   cv.Round (frame.height / image_scale)), 8, 1)
    # convert color input image to grayscale
    cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)
 
    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
 
    cv.EqualizeHist(small_img, small_img)

    #flip the image for more convenient camera mounting
    cv.Flip(small_img,small_img2,-1)

    midFace = None
    t1 = cv.GetTickCount()
 
    if(cascade):
        t = cv.GetTickCount()
        # HaarDetectObjects takes 0.02s
        faces = cv.HaarDetectObjects(small_img2, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        if faces:
            #lights(50 if len(faces) == 0 else 0, 50 if len(faces) > 0 else 0,0,50)

            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
 #               cv.Rectangle(frame, pt1, pt2, cv.RGB(100, 220, 255), 1, 8, 0)
                # get the xy corner co-ords, calc the midFace location
                x1 = pt1[0]
                x2 = pt2[0]
                y1 = pt1[1]
                y2 = pt2[1]

                midFaceX = x1+((x2-x1)/2)
                midFaceY = y1+((y2-y1)/2)
                midFace = (midFaceX, midFaceY)

                offsetX = midFaceX / float(frame.width/2)
                offsetY = midFaceY / float(frame.height/2)
                offsetX -= 1
                offsetY -= 1

                cam_pan -= (offsetX * 5)
                cam_tilt += (offsetY * 5)
                cam_pan = max(0,min(180,cam_pan))
                cam_tilt = max(0,min(180,cam_tilt))

                print(offsetX, offsetY, midFace, cam_pan, cam_tilt, frame.width, frame.height)
                sys.stdout.flush()
 #               pan(int(cam_pan-90))
  #              tilt(int(cam_tilt-90))
                #break
 #   print "e"+str((t1-t0)/1000000)+"-"+str( (cv.GetTickCount()-t1)/1000000)
#    cv.ShowImage('Tracker',frame)
    if cv.WaitKey(1) & 0xFF == ord('q'):
        return False
    return True
Ejemplo n.º 2
0
'''
from cv2 import cv
from GUI import *
def adaptivethreshold():
    cv.AdaptiveThreshold(src,dst,255,cv.CV_ADAPTIVE_THRESH_MEAN_C,cv.CV_THRESH_BINARY_INV,3,5)
    display(dst,"Destination Image")
    cv.WaitKey(0)
'''

from cv2 import cv
from GUI import *
from database import *
path = getpath()
src = cv.LoadImage(path, 0)
dst = cv.CreateImage((src.width, src.height), 8, src.channels)


def adaptivethreshold():
    display(src, "Source Image")
    cv.AdaptiveThreshold(src, dst, 255, cv.CV_ADAPTIVE_THRESH_MEAN_C,
                         cv.CV_THRESH_BINARY_INV, 3, 5)
    display(dst, "Destination Image")
    cv.WaitKey(0)


if __name__ == '__main__':
    adaptivethreshold()
Ejemplo n.º 3
0
# send OSC tracking message in the network
client = OSC.OSCClient()
client.connect(('127.0.0.1', 7000))

# capture frame from available camera
capture = cv.CaptureFromCAM(0)

# get image size
testframe = cv.QueryFrame(capture)
size_image = cv.GetSize(testframe)
print "image is size %d x %d" % size_image

# images for base processing

# RGB format
rgb_image = cv.CreateImage(size_image, 8, 3)

# HSV image (better for color processing)
hsv_image = cv.CreateImage(size_image, cv.IPL_DEPTH_8U, 3)

# mask images: will contain pixels identified by color (see color plate
# included)
yellowmask_image = cv.CreateImage(size_image, cv.IPL_DEPTH_8U, 1)
greenmask_image = cv.CreateImage(size_image, cv.IPL_DEPTH_8U, 1)
redmask_image = cv.CreateImage(size_image, cv.IPL_DEPTH_8U, 1)
bluemask_image = cv.CreateImage(size_image, cv.IPL_DEPTH_8U, 1)

# pixels are gathered in "blobs" in theses images
greenblob_image = cv.CreateImage(size_image, cv.IPL_DEPTH_8U, 1)
yellowblob_image = cv.CreateImage(size_image, cv.IPL_DEPTH_8U, 1)
redblob_image = cv.CreateImage(size_image, cv.IPL_DEPTH_8U, 1)
Ejemplo n.º 4
0
    if len(sys.argv) > 1:
        filename = sys.argv[1]
        src = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE)
    else:
        url = 'https://raw.github.com/Itseez/opencv/master/samples/cpp/building.jpg'
        filedata = urllib2.urlopen(url).read()
        imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
        cv.SetData(imagefiledata, filedata, len(filedata))
        src = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)


    cv.NamedWindow("Source", 1)
    cv.NamedWindow("Hough", 1)

    while True:
        dst = cv.CreateImage(cv.GetSize(src), 8, 1)
        color_dst = cv.CreateImage(cv.GetSize(src), 8, 3)
        storage = cv.CreateMemStorage(0)
        lines = 0
        cv.Canny(src, dst, 50, 200, 3)
        cv.CvtColor(dst, color_dst, cv.CV_GRAY2BGR)

        if USE_STANDARD:
            lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 1, pi / 180, 100, 0, 0)
            for (rho, theta) in lines[:100]:
                a = cos(theta)
                b = sin(theta)
                x0 = a * rho
                y0 = b * rho
                pt1 = (cv.Round(x0 + 1000*(-b)), cv.Round(y0 + 1000*(a)))
                pt2 = (cv.Round(x0 - 1000*(-b)), cv.Round(y0 - 1000*(a)))
Ejemplo n.º 5
0
    if height is None:
        height = int(
            cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
    else:
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)

    if capture:
        frame_copy = None
        while True:

            frame = cv.QueryFrame(capture)
            if not frame:
                cv.WaitKey(0)
                break
            if not frame_copy:
                frame_copy = cv.CreateImage((frame.width, frame.height),
                                            cv.IPL_DEPTH_8U, frame.nChannels)

            if frame.origin == cv.IPL_ORIGIN_TL:
                cv.Copy(frame, frame_copy)
            else:
                cv.Flip(frame, frame_copy, 0)

            detect_and_draw(frame_copy, cascade)

            if cv.WaitKey(10) >= 0:
                break
    else:
        image = cv.LoadImage(input_name, 1)
        detect_and_draw(image, cascade)
        cv.WaitKey(0)
Ejemplo n.º 6
0
def ActivateCamera():
    capture = cv.CaptureFromCAM(0)

    # decalre the zbar_setter as zbar scanner
    zbar_scanner = zbar.ImageScanner()
    checkScan = False
    while (checkScan == False):
        # create the variable as a frame of camera
        img = cv.QueryFrame(capture)
        height = int(img.height)
        width = int(img.width)

        SubRect = cv.GetSubRect(img, (1, 1, width - 1, height - 1))

        # cv.Rectangle(img,(0,0),(width,height),(255,0,0))

        # to create the image
        set_image = cv.CreateImage((SubRect.width, SubRect.height),
                                   cv.IPL_DEPTH_8U, 1)

        cv.ConvertImage(SubRect, set_image)

        image = zbar.Image(set_image.width, set_image.height, 'Y800',
                           set_image.tostring())

        zbar_scanner.scan(image)

        for item in image:

            getFromScan = item.data

            splitString = getFromScan.split(":")
            checkProductName = splitString[0]
            checkExpirydate = splitString[2]
            itemCode = splitString[3]
            # data = {"name": getFromScan}
            data = {
                'price': 30,
                'Code': splitString[3],
                'name': splitString[0],
                'expiryDate': splitString[2]
            }
            checkDuplicate = False
            counterOuter = 0
            """"""
            # 1.1
            for i in itemlist:

                # 2.1 same product name
                if i['name'] == checkProductName:
                    print "have same product name"
                    checkDuplicate = True
                    # 3.1 same expiryDate
                    if i['expiryDate'] == checkExpirydate:
                        print "have same expiryDate"
                        """
                        print i['Code'] + ' is the old item code'
                        i['Code'] += ',' + itemCode
                        print i['Code'] + ' is the new item code' """
                        print i['Code']
                        CodeString = json.dumps(i['Code'])
                        cleanCodeString = CodeString.replace('"', '')
                        listOfCode = cleanCodeString.split(',')
                        counterInner = 0
                        for itemID in listOfCode:

                            print 'U r in the new for loop'
                            # 4.1 itemCode(itemID) is duplicate
                            print 'itemID is ' + itemID
                            print 'itemCode is ' + itemCode
                            if itemID == itemCode:

                                # 5.1 array of Code is equal 1
                                if len(listOfCode) == 1:
                                    # remove statement

                                    print 'U are in the code is duplicate condition'

                                    checkDuplicate = True
                                    removeItemKey = itemlist_key[counterOuter]
                                    print removeItemKey
                                    forRemoveDupCode = firebase.ref(
                                        'CustomerInfo/-KqsdeuVyyatxKELuMs4/Item'
                                    )
                                    forRemoveDupCode.child(
                                        removeItemKey).delete()
                                    # ref.child(removeItemKey).delete()
                                    itemlist.pop(counterOuter)
                                    itemlist_key.pop(counterOuter)
                                    #
                                    """   
                                            #
                                    """
                                    print("Successful for removing item" +
                                          getFromScan)
                                    microgear.chat("outdoor/temp",
                                                   json.dumps(itemlist))
                                    time.sleep(5)
                                    return True
                                # 5.2 array of Code is greater than 1
                                else:
                                    # delete item code from dict and update
                                    # update statement

                                    print 'Code  duplicate but length is greater than 1 condition'

                                    checkDuplicate = True
                                    removeItemKey = itemlist_key[counterOuter]
                                    print removeItemKey

                                    # for update duplicate product name and expirydate but itemCode(itemID)

                                    # i['Code'] += ',' + itemCode

                                    del listOfCode[counterInner]
                                    newCode = json.dumps(listOfCode)
                                    newCode1 = newCode.replace('"', '')
                                    newCode2 = newCode1.replace('/', '')
                                    newCode3 = newCode2.replace('[', '')
                                    newCode4 = newCode3.replace(']', '')
                                    newCode5 = newCode4.replace(' ', '')
                                    forRemoveExCode = firebase.ref(
                                        'CustomerInfo/-KqsdeuVyyatxKELuMs4/Item'
                                    )
                                    forRemoveExCode.child(removeItemKey).child(
                                        'Code').set(newCode5)
                                    newListCode = ''
                                    for i in listOfCode:
                                        newListCode += i

                                    i['Code'] = newListCode
                                    """   
                                            #
                                    """
                                    print("Successful for removing item Code")
                                    microgear.chat("outdoor/temp",
                                                   json.dumps(itemlist))
                                    time.sleep(5)
                                    return True
                            counterInner += 1
                        # 4.2 itemCode(itemID) is not duplicate
                        else:
                            # update statement with append new itemCode into existing dict
                            print 'update'
                            checkDuplicate = True
                            removeItemKey = itemlist_key[counterOuter]
                            listOfCode.append(itemCode)
                            newCode = json.dumps(listOfCode)
                            newCode1 = newCode.replace('"', '')
                            newCode2 = newCode1.replace('/', '')
                            newCode3 = newCode2.replace('[', '')
                            newCode4 = newCode3.replace(']', '')
                            newCode5 = newCode4.replace(' ', '')
                            forupdateNewCode = firebase.ref(
                                'CustomerInfo/-KqsdeuVyyatxKELuMs4/Item')
                            forupdateNewCode.child(removeItemKey).child(
                                'Code').set(newCode5)
                            newListCode = ''
                            for i in listOfCode:
                                newListCode += i

                            i['Code'] = newListCode
                            microgear.chat("outdoor/temp",
                                           json.dumps(itemlist))
                            time.sleep(5)
                            return True

                    # 3.2 expiry date is not duplicate
                    else:
                        # add statement
                        checkDuplicate = True
                        foraddnewDate = firebase.ref(
                            'CustomerInfo/-KqsdeuVyyatxKELuMs4/Item')

                        NewItemKey = foraddnewDate.push(data)
                        print("Successful for adding new date")
                        abcd = json.dumps(NewItemKey.values())
                        pkpk = re.sub('[^a-zA-Z_0-9-]+', '', abcd)
                        itemlist.append(data)
                        itemlist_key.append(pkpk)
                        microgear.chat("outdoor/temp", json.dumps(itemlist))
                        time.sleep(5)
                        return True
                counterOuter += 1

            # 1.2 if checkDuplicate == false it will add the new one
            if checkDuplicate == False:
                # add statement
                print 'add new item'
                foraddNewitemRef = firebase.ref(
                    'CustomerInfo/-KqsdeuVyyatxKELuMs4/Item')

                NewItemKey = foraddNewitemRef.push(data)
                print("Successful for adding new item")
                abcd = json.dumps(NewItemKey.values())
                pkpk = re.sub('[^a-zA-Z_0-9-]+', '', abcd)
                itemlist.append(data)
                itemlist_key.append(pkpk)
                microgear.chat("outdoor/temp", json.dumps(itemlist))
                """
                print abcd
                print data
                print pkpk"""
                time.sleep(5)
                return True

        #cv.ShowImage("ISR Scanner", img)

        # less for fast video rendering
        cv.WaitKey(1)
Ejemplo n.º 7
0
    """
    icolor = random.randint(0, 0xFFFFFF)
    return cv.Scalar(icolor & 0xff, (icolor >> 8) & 0xff, (icolor >> 16) & 0xff)

if __name__ == '__main__':

    # some "constants"
    width = 1000
    height = 700
    window_name = "Drawing Demo"
    number = 100
    delay = 5
    line_type = cv.CV_AA  # change it to 8 to see non-antialiased graphics

    # create the source image
    image = cv.CreateImage( (width, height), 8, 3)

    # create window and display the original picture in it
    cv.NamedWindow(window_name, 1)
    cv.SetZero(image)
    cv.ShowImage(window_name, image)

    # create the random number
    random = Random()

    # draw some lines
    for i in range(number):
        pt1 =  (random.randrange(-width, 2 * width),
                          random.randrange(-height, 2 * height))
        pt2 =  (random.randrange(-width, 2 * width),
                          random.randrange(-height, 2 * height))
Ejemplo n.º 8
0
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)

    font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 1, 0, 2, 8)

    while True:

        frame = cv.QueryFrame(capture)

        if not frame:

            cv.WaitKey(0)
            break

        cv.Smooth(frame, frame, cv.CV_BLUR, 3)

        hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)

        cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)

        thr = cv.CreateImage(cv.GetSize(frame), 8, 1)

        #Change threshold value

        cv.InRangeS(hsv, (0, 140, 10), (170, 180, 60), thr)

        moments = cv.Moments(cv.GetMat(thr, 1), 0)

        area = cv.GetCentralMoment(moments, 0, 0)

        cv.Line(frame, (80, 0), (80, 120), (0, 0, 255), 3, 8, 0)
Ejemplo n.º 9
0
        draw_subdiv_facet(img, cv.Subdiv2DRotateEdge(edge, 1))

        # right
        draw_subdiv_facet(img, cv.Subdiv2DRotateEdge(edge, 3))


if __name__ == '__main__':
    win = "source"
    rect = (0, 0, 600, 600)

    active_facet_color = cv.RGB(255, 0, 0)
    delaunay_color = cv.RGB(0, 0, 0)
    voronoi_color = cv.RGB(0, 180, 0)
    bkgnd_color = cv.RGB(255, 255, 255)

    img = cv.CreateImage((rect[2], rect[3]), 8, 3)
    cv.Set(img, bkgnd_color)

    cv.NamedWindow(win, 1)

    storage = cv.CreateMemStorage(0)
    subdiv = cv.CreateSubdivDelaunay2D(rect, storage)

    print "Delaunay triangulation will be build now interactively."
    print "To stop the process, press any key\n"

    for i in range(200):
        fp = (random.random() * (rect[2] - 10) + 5,
              random.random() * (rect[3] - 10) + 5)

        locate_point(subdiv, fp, img, active_facet_color)
        cv.MorphologyEx(im, im, None, None, cv.CV_MOP_CLOSE)

def dilateImage(im, nbiter=0):
    for i in range(nbiter):
        cv.Dilate(im, im)

def erodeImage(im, nbiter=0):
    for i in range(nbiter):
        cv.Erode(im, im)

def thresholdImage(im, value, filter=cv.CV_THRESH_BINARY_INV):
    cv.Threshold(im, im, value, 255, filter)

def resizeImage(im, (width, height)):
    #It appears to me that resize an image can be significant for the ocr engine to detect characters
    res = cv.CreateImage((width,height), im.depth, im.channels)
    cv.Resize(im, res)
    return res

def getContours(im, approx_value=1): #Return contours approximated
    storage = cv.CreateMemStorage(0)
    contours = cv.FindContours(cv.CloneImage(im), storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
    contourLow=cv.ApproxPoly(contours, storage, cv.CV_POLY_APPROX_DP,approx_value,approx_value)
    return contourLow

def getIndividualContoursRectangles(contours): #Return the bounding rect for every contours
    contourscopy = contours
    rectangleList = []
    while contourscopy:
        x,y,w,h = cv.BoundingRect(contourscopy)
        rectangleList.append((x,y,w,h))
Ejemplo n.º 11
0
ipts = mk_image_points(goodcorners)
opts = mk_object_points(len(goodcorners), .1)
npts = mk_point_counts(len(goodcorners))

intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
distortion = cv.CreateMat(4, 1, cv.CV_64FC1)
cv.SetZero(intrinsics)
cv.SetZero(distortion)
# focal lengths have 1/1 ratio
intrinsics[0, 0] = 1.0
intrinsics[1, 1] = 1.0
cv.CalibrateCamera2(opts,
                    ipts,
                    npts,
                    cv.GetSize(images[0]),
                    intrinsics,
                    distortion,
                    cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
                    cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
                    flags=0)  # cv.CV_CALIB_ZERO_TANGENT_DIST)
print "D =", list(cvmat_iterator(distortion))
print "K =", list(cvmat_iterator(intrinsics))
mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
cv.InitUndistortMap(intrinsics, distortion, mapx, mapy)
for img in images:
    r = cv.CloneMat(img)
    cv.Remap(img, r, mapx, mapy)
    cv.ShowImage("snap", r)
    cv.WaitKey()
Ejemplo n.º 12
0
    rng = cv.RNG(-1)

    print "Hot keys:"
    print "\tESC - quit the program"
    print "\tr - restore the original image"
    print "\tw - run watershed algorithm"
    print "\t  (before that, roughly outline several markers on the image)"

    cv.NamedWindow("image", 1)
    cv.NamedWindow("watershed transform", 1)

    img = cv.CloneImage(img0)
    img_gray = cv.CloneImage(img0)
    wshed = cv.CloneImage(img0)
    marker_mask = cv.CreateImage(cv.GetSize(img), 8, 1)
    markers = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_32S, 1)

    cv.CvtColor(img, marker_mask, cv.CV_BGR2GRAY)
    cv.CvtColor(marker_mask, img_gray, cv.CV_GRAY2BGR)

    cv.Zero(marker_mask)
    cv.Zero(wshed)

    cv.ShowImage("image", img)
    cv.ShowImage("watershed transform", wshed)

    sk = Sketcher("image", [img, marker_mask])

    while True:
        c = cv.WaitKey(0) % 0x100
Ejemplo n.º 13
0
import cv2.cv as cv

im = cv.LoadImage("../img/alkaline.jpg")  #get the image

thumb = cv.CreateImage(
    (im.width / 2, im.height / 2), 8,
    3)  #Create an image thatis twice smaller than the original
cv.Resize(im, thumb)  #resize the original image into thumb
#cv.PyrDown(im, thumb)
cv.SaveImage("thumb.png", thumb)  # save the thumb image
#capture = cv.CaptureFromCAM(1)
capture = cv.CaptureFromCAM(2)

#width = 160 #leave None for auto-detection
#height = 120 #leave None for auto-detection
width = 640  #leave None for auto-detection
height = 480  #leave None for auto-detection
middle_w = 2  #Cross Center width 十字中心歸零校正,數字越大線往左
middle_h = 2  #Cross Center height 十字中心歸零校正,數字越大線往上
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)

while True:
    img = cv.QueryFrame(capture)
    cv.Smooth(img, img, cv.CV_BLUR, 3)
    hue_img = cv.CreateImage(cv.GetSize(img), 8, 3)
    cv.CvtColor(img, hue_img, cv.CV_BGR2HSV)

    threshold_img = cv.CreateImage(cv.GetSize(hue_img), 8, 1)
    #Python: cv.InRangeS(src, lower, upper, dst) http://www.colorspire.com/
    cv.InRangeS(hue_img, (38, 120, 60), (75, 255, 255),
                threshold_img)  # color code green
    #cv.InRangeS(hue_img, (100,120,60), (200,255,255), threshold_img)          # color code blue
    #cv.InRangeS(hue_img, (0,100,60), (10,255,255), threshold_img)           # color code red

    storage = cv.CreateMemStorage(0)
    contour = cv.FindContours(threshold_img, storage, cv.CV_RETR_CCOMP,
                              cv.CV_CHAIN_APPROX_SIMPLE)

    points = []
    while contour:
def imgResizer1(crop_img_string, width, height):
	image1 = cv.LoadImage(crop_img_string,cv.CV_LOAD_IMAGE_GRAYSCALE)
	dst1 = cv.CreateImage((width,height), 8, 1)
	cv.Resize(image1,dst1,interpolation=cv.CV_INTER_LINEAR)
	cv.SaveImage('Z_Resized_image.png', dst1)
	return dst1
haarEyes = cv.Load('eye.xml')
haarleftEar = cv.Load('left_ear.xml')
haarrightEar = cv.Load('right_ear.xml')


def detect(imcolor):-,
    eyesList=[]
    faceList=[]
    leftEarList=[]
    rightEarList=[]

    
    image_size = cv.GetSize(imcolor)
    
    # create grayscale version
    grayscale = cv.CreateImage(image_size, 8, 1)
    cv.CvtColor(imcolor, grayscale, cv.CV_BGR2GRAY)

    # create storage
    storage = cv.CreateMemStorage(0)

    # equalize histogram
    cv.EqualizeHist(grayscale, grayscale)

    # detect objects
    detectedFace = cv.HaarDetectObjects(imcolor, haarFace, storage)
    detectedLeftEar = cv.HaarDetectObjects(imcolor, haarleftEar, storage)
    detectedRightEar = cv.HaarDetectObjects(imcolor, haarrightEar, storage)

    detectedEyes = cv.HaarDetectObjects(imcolor, haarEyes, storage)
Ejemplo n.º 17
0
import cv2.cv as cv

img = cv.LoadImage('img/blueRose.jpeg', cv.CV_LOAD_IMAGE_COLOR)
rose = cv.CreateImage(cv.GetSize(img), cv.CV_8UC2, 3)
cv.Convert(img, rose)
cv.ShowImage('converting - press key \'space\' to invoke convert', rose)

k = cv.WaitKey(0)
#assert k == 32 # space
rose2 = cv.CreateImage(cv.GetSize(img), cv.CV_8UC2, 3)
cv.CvtColor(img, rose2, cv.CV_RGB2BGR)
cv.ShowImage('CvtColor', rose2)
k = cv.WaitKey(0)
if k == 27:
    cv.DestroyAllWindows()


    def OnPaint(self, evt):
        if not self.timer.IsRunning():
            dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap,
                               wx.BUFFER_VIRTUAL_AREA)
            dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0)))
            return

        # Capture de l'image
        frame = cv.QueryFrame(CAMERA)
        cv.CvtColor(frame, frame, cv.CV_BGR2RGB)
        Img = wx.EmptyImage(frame.width, frame.height)
        Img.SetData(frame.tostring())
        self.bmp = wx.BitmapFromImage(Img)
        width, height = frame.width, frame.height

        # Détection des visages
        min_size = (20, 20)
        image_scale = 2
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0

        gray = cv.CreateImage((frame.width, frame.height), 8, 1)
        small_img = cv.CreateImage((cv.Round(
            frame.width / image_scale), cv.Round(frame.height / image_scale)),
                                   8, 1)
        cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)
        cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
        cv.EqualizeHist(small_img, small_img)

        listeVisages = cv.HaarDetectObjects(small_img, CASCADE,
                                            cv.CreateMemStorage(0), haar_scale,
                                            min_neighbors, haar_flags,
                                            min_size)

        # Affichage de l'image
        x, y = (0, 0)
        try:
            dc = wx.BufferedDC(wx.ClientDC(self), wx.NullBitmap,
                               wx.BUFFER_VIRTUAL_AREA)
            try:
                dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0)))
            except:
                pass
            dc.Clear()
            dc.DrawBitmap(self.bmp, x, y)

            # Dessin des rectangles des visages
            if listeVisages:
                for ((x, y, w, h), n) in listeVisages:
                    dc.SetBrush(wx.TRANSPARENT_BRUSH)
                    dc.SetPen(wx.Pen(wx.Colour(255, 0, 0), 2))
                    dc.DrawRectangle(x * image_scale, y * image_scale,
                                     w * image_scale, h * image_scale)

            self.listeVisages = listeVisages
            del dc
            del Img

        except TypeError:
            pass
        except wx.PyDeadObjectError:
            pass
Ejemplo n.º 19
0
path_root = '/home/ivan/dev/pydev/lab/labtrans/plotter/data/cam/'
# 20130626_115022_imagemPlaca.jpg
for f in os.listdir(path_root):
    #for f in ['20130626_115022_imagemPlaca.jpg']:
    print f
    image = cv.LoadImageM(path_root + f)
    for plate in anpr.detect_plates(image):
        #quick_show(image)
        #quick_show(plate)
        #zzz = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 3)
        #cv.Smooth(plate, zzz)
        #
        #cv.PyrMeanShiftFiltering(plate, zzz, 40, 15)
        foo = anpr.greyscale(plate)
        segmented = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        bar = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        cv.EqualizeHist(foo, segmented)

        cv.AdaptiveThreshold(
            segmented, bar, 255, cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
            cv.CV_THRESH_BINARY_INV,
            plate.height % 2 == 0 and (plate.height + 1) or plate.height,
            plate.height / 2)

        baz = cv.CreateImage(cv.GetSize(plate), cv.IPL_DEPTH_8U, 1)
        el = cv.CreateStructuringElementEx(1, 2, 0, 0, cv.CV_SHAPE_RECT)
        cv.Erode(bar, baz, el)
        # quick_show(plate)
        print 'baz'
        quick_show(baz)
Ejemplo n.º 20
0
import cv2.cv as cv
import math

im=cv.LoadImage('../img/road.png', cv.CV_LOAD_IMAGE_GRAYSCALE)

pi = math.pi #Pi value

dst = cv.CreateImage(cv.GetSize(im), 8, 1)

cv.Canny(im, dst, 200, 200)
cv.Threshold(dst, dst, 100, 255, cv.CV_THRESH_BINARY)

#---- Standard ----
color_dst_standard = cv.CreateImage(cv.GetSize(im), 8, 3)
cv.CvtColor(im, color_dst_standard, cv.CV_GRAY2BGR)#Create output image in RGB to put red lines

lines = cv.HoughLines2(dst, cv.CreateMemStorage(0), cv.CV_HOUGH_STANDARD, 1, pi / 180, 100, 0, 0)
for (rho, theta) in lines[:100]:
    a = math.cos(theta) #Calculate orientation in order to print them
    b = math.sin(theta)
    x0 = a * rho 
    y0 = b * rho
    pt1 = (cv.Round(x0 + 1000*(-b)), cv.Round(y0 + 1000*(a)))
    pt2 = (cv.Round(x0 - 1000*(-b)), cv.Round(y0 - 1000*(a)))
    cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2, 4) #Draw the line

        
#---- Probabilistic ----
color_dst_proba = cv.CreateImage(cv.GetSize(im), 8, 3)
cv.CvtColor(im, color_dst_proba, cv.CV_GRAY2BGR) # idem
Ejemplo n.º 21
0

if __name__ == "__main__":
    edge_thresh = 100

    if len(sys.argv) > 1:
        gray = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
    else:
        url = 'http://code.opencv.org/svn/opencv/trunk/opencv/samples/c/stuff.jpg'
        filedata = urllib2.urlopen(url).read()
        imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
        cv.SetData(imagefiledata, filedata, len(filedata))
        gray = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)

    # Create the output image
    dist = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_32F, 1)
    dist8u1 = cv.CloneImage(gray)
    dist8u2 = cv.CloneImage(gray)
    dist8u = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_8U, 3)
    dist32s = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_32S, 1)

    # Convert to grayscale
    edge = cv.CloneImage(gray)

    # Create a window
    cv.NamedWindow(wndname, 1)

    # create a toolbar
    cv.CreateTrackbar(tbarname, wndname, edge_thresh, 255, on_trackbar)

    # Show the image
Ejemplo n.º 22
0
#coding=utf-8

import cv2.cv as cv

image = cv.LoadImage('meinv.jpg', cv.CV_LOAD_IMAGE_COLOR)

font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
y = image.height / 4
x = image.width / 2

cv.PutText(image, "Hello Meinv!", (x, y), font, cv.RGB(0, 0, 0))

thumb = cv.CreateImage((image.width / 2, image.height / 2), cv.CV_8UC2, 3)
cv.Resize(image, thumb)
#cvt = cv.CreateImage(cv.GetSize(thumb), cv.CV_8UC2, 3)
#cv.CvtColor(thumb, cvt, cv.CV_RGB2BGR)
#cv.NamedWindow('Image', cv.CV_WINDOW_AUTOSIZE)

b = cv.CreateImage(cv.GetSize(thumb), thumb.depth, 1)
g = cv.CloneImage(b)
r = cv.CloneImage(b)

cv.Split(thumb, b, g, r, None)

merged = cv.CreateImage(cv.GetSize(thumb), 8, 3)
cv.Merge(g, b, r, None, merged)

cv.ShowImage('Image', thumb)
cv.ShowImage('Blue', b)
cv.ShowImage('Green', g)
cv.ShowImage('Red', r)
Ejemplo n.º 23
0
def findSquares4(img, storage):
    N = 11
    sz = (img.width & -2, img.height & -2)
    timg = cv.CloneImage(img); # make a copy of input image
    gray = cv.CreateImage(sz, 8, 1)
    pyr = cv.CreateImage((sz.width/2, sz.height/2), 8, 3)
    # create empty sequence that will contain points -
    # 4 points per square (the square's vertices)
    squares = cv.CreateSeq(0, sizeof_CvSeq, sizeof_CvPoint, storage)
    squares = CvSeq_CvPoint.cast(squares)

    # select the maximum ROI in the image
    # with the width and height divisible by 2
    subimage = cv.GetSubRect(timg, cv.Rect(0, 0, sz.width, sz.height))

    # down-scale and upscale the image to filter out the noise
    cv.PyrDown(subimage, pyr, 7)
    cv.PyrUp(pyr, subimage, 7)
    tgray = cv.CreateImage(sz, 8, 1)
    # find squares in every color plane of the image
    for c in range(3):
        # extract the c-th color plane
        channels = [None, None, None]
        channels[c] = tgray
        cv.Split(subimage, channels[0], channels[1], channels[2], None)
        for l in range(N):
            # hack: use Canny instead of zero threshold level.
            # Canny helps to catch squares with gradient shading
            if(l == 0):
                # apply Canny. Take the upper threshold from slider
                # and set the lower to 0 (which forces edges merging)
                cv.Canny(tgray, gray, 0, thresh, 5)
                # dilate canny output to remove potential
                # holes between edge segments
                cv.Dilate(gray, gray, None, 1)
            else:
                # apply threshold if l!=0:
                #     tgray(x, y) = gray(x, y) < (l+1)*255/N ? 255 : 0
                cv.Threshold(tgray, gray, (l+1)*255/N, 255, cv.CV_THRESH_BINARY)

            # find contours and store them all as a list
            count, contours = cv.FindContours(gray, storage, sizeof_CvContour,
                cv.CV_RETR_LIST, cv. CV_CHAIN_APPROX_SIMPLE, (0, 0))

            if not contours:
                continue

            # test each contour
            for contour in contours.hrange():
                # approximate contour with accuracy proportional
                # to the contour perimeter
                result = cv.ApproxPoly(contour, sizeof_CvContour, storage,
                    cv.CV_POLY_APPROX_DP, cv.ContourPerimeter(contours)*0.02, 0)
                # square contours should have 4 vertices after approximation
                # relatively large area (to filter out noisy contours)
                # and be convex.
                # Note: absolute value of an area is used because
                # area may be positive or negative - in accordance with the
                # contour orientation
                if(result.total == 4 and
                    abs(cv.ContourArea(result)) > 1000 and
                    cv.CheckContourConvexity(result)):
                    s = 0
                    for i in range(5):
                        # find minimum angle between joint
                        # edges (maximum of cosine)
                        if(i >= 2):
                            t = abs(angle(result[i], result[i-2], result[i-1]))
                            if s<t:
                                s=t
                    # if cosines of all angles are small
                    # (all angles are ~90 degree) then write quandrange
                    # vertices to resultant sequence
                    if(s < 0.3):
                        for i in range(4):
                            squares.append(result[i])

    return squares
Ejemplo n.º 24
0
# coding=utf-8
# name=hu_yang_jie
import cv2.cv as cv

im = cv.LoadImage("bili.png", cv.CV_LOAD_IMAGE_GRAYSCALE)

dst_32f = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_32F, 1)

neighbourhood = 3
aperture = 3
k = 0.01
maxStrength = 0.0
threshold = 0.01
nonMaxSize = 3

cv.CornerHarris(im, dst_32f, neighbourhood, aperture, k)

minv, maxv, minl, maxl = cv.MinMaxLoc(dst_32f)

dilated = cv.CloneImage(dst_32f)
cv.Dilate(
    dst_32f, dilated
)  # By this way we are sure that pixel with local max value will not be changed, and all the others will

localMax = cv.CreateMat(dst_32f.height, dst_32f.width, cv.CV_8U)
cv.Cmp(
    dst_32f, dilated, localMax, cv.CV_CMP_EQ
)  #compare allow to keep only non modified pixel which are local maximum values which are corners.

threshold = 0.01 * maxv
cv.Threshold(dst_32f, dst_32f, threshold, 255, cv.CV_THRESH_BINARY)
# Abrimos el stream de vídeo y creamos una ventana con la vista previa del mismo.
cap = cv.CreateCameraCapture(0)
cv.NamedWindow("Seguimiento", 1)

if cap:
    frame_copy = None

while (True):
    # Capturamos la imgen frame por frame y creamos la ventana en caso de que exista.
    frame = cv.QueryFrame(cap)
    if not frame:
        cv.WaitKey(0)
        break
    if not frame_copy:
        frame_copy = cv.CreateImage((frame.width, frame.height),
                                    cv.IPL_DEPTH_8U, frame.nChannels)

    # Reescalamos la imagen tanto en factores de alto como ancho a la hora de pasarla a grises y hacer el escalado posterior.
    gray = cv.CreateImage((frame.width, frame.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        frame.width / image_scale), cv.Round(frame.height / image_scale)), 8,
                               1)

    # Convertimos el color a escala de grises con la función BGR2GRAY para facilitar la detección ya que el color no nos es útil.
    cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)

    # Reescalamos la imagen para aumentar el rendimiento.
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    # Ecualizamos el histograma de los colores de la imagen en escala de grises para trabajar con valores de 0 a 255.
    cv.EqualizeHist(small_img, small_img)
Ejemplo n.º 26
0
def detectObject(filename):
    img=cv.LoadImage(filename)
    '''
    #get color histogram
    '''
   
#     im32f=np.zeros((img.shape[:2]),np.uint32)
    hist_range=[[0,256],[0,256],[0,256]]
    im32f=cv.CreateImage(cv.GetSize(img), cv2.IPL_DEPTH_32F, 3)
    cv.ConvertScale(img, im32f)
    
    
    hist=cv.CreateHist([32,32,32],cv.CV_HIST_ARRAY,hist_range,3)
    '''
    #create three histogram'''
    b=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    g=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    r=cv.CreateImage(cv.GetSize(im32f), cv2.IPL_DEPTH_32F, 1)
    
   
    '''
    #create image backproject 32f, 8u
    '''
    backproject32f=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_32F,1)
    backproject8u=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_8U,1)
    '''
    #create binary
    '''
    bw=cv.CreateImage(cv.GetSize(img),cv2.IPL_DEPTH_8U,1)
    '''
    #create kernel image
    '''
    kernel=cv.CreateStructuringElementEx(3, 3, 1, 1, cv2.MORPH_ELLIPSE)
    cv.Split(im32f, b, g, r,None)

    planes=[b,g,r]
    cv.CalcHist(planes, hist)
    '''
    #find min and max histogram bin.
    '''
    minval=maxval=0.0
    min_idx=max_idx=0
    minval, maxval, min_idx, max_idx=cv.GetMinMaxHistValue(hist)
    '''
    # threshold histogram.  this sets the bin values that are below the threshold
    to zero
    '''
    cv.ThreshHist(hist, maxval/32.0)
    '''
    #backproject the thresholded histogram, backprojection should contian higher values for
    #background and lower values for the foreground
    '''
    cv.CalcBackProject(planes, backproject32f, hist)
    '''
    #convert to 8u type
    '''
    val_min=val_max=0.0
    idx_min=idx_max=0
    val_min,val_max,idx_min,idx_max=cv.MinMaxLoc(backproject32f)
    cv.ConvertScale(backproject32f, backproject8u,255.0/maxval)
    '''
    #threshold backprojected image. this gives us the background
    '''
    cv.Threshold(backproject8u, bw, 10, 255, cv2.THRESH_BINARY)
    '''
    #some morphology on background
    '''
    cv.Dilate(bw, bw,kernel,1)
    cv.MorphologyEx(bw, bw, None,kernel, cv2.MORPH_CLOSE, 2)
    '''
    #get the foreground
    '''
    cv.SubRS(bw,cv.Scalar(255,255,255),bw)
    cv.MorphologyEx(bw, bw, None, kernel,cv2.MORPH_OPEN,2)
    cv.Erode(bw, bw, kernel, 1)
    '''
    #find contours of foreground
    #Grabcut
    '''
    size=cv.GetSize(bw)
    color=np.asarray(img[:,:])
    fg=np.asarray(bw[:,:])
#     mask=cv.CreateMat(size[1], size[0], cv2.CV_8UC1)
    '''
    #Make anywhere black in the grey_image (output from MOG) as likely background
    #Make anywhere white in the grey_image (output from MOG) as definite foreground
    '''
    rect = (0,0,0,0)
   
    mat_mask=np.zeros((size[1],size[0]),dtype='uint8')
    mat_mask[:,:]=fg
    
    mat_mask[mat_mask[:,:] == 0] = 2
    mat_mask[mat_mask[:,:] == 255] = 1
    
    '''
    #Make containers 
    '''                               
    bgdModel = np.zeros((1, 13 * 5))
    fgdModel = np.zeros((1, 13 * 5))
    cv2.grabCut(color, mat_mask, rect, bgdModel, fgdModel,cv2.GC_INIT_WITH_MASK)
    '''
    #Multiple new mask by original image to get cut
    '''
    mask2 = np.where((mat_mask==0)|(mat_mask==2),0,1).astype('uint8')  
    gcfg=np.zeros((size[1],size[0]),np.uint8)
    gcfg=mask2
    
    img_cut = color*mask2[:,:,np.newaxis]

    contours, hierarchy=cv2.findContours(gcfg ,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
    
    for cnt in contours:
        print cnt
        rect_box = cv2.minAreaRect(cnt)
        box = cv2.cv.BoxPoints(rect_box)
        box = np.int0(box)
        cv2.drawContours(color,[box], 0,(0,0,255),2)
    cv2.imshow('demo', color)
    cv2.waitKey(0)
import cv2.cv as cv
image = cv.LoadImage('t.png', cv.CV_LOAD_IMAGE_COLOR)
cv.ShowImage("Original", image)

grey = cv.CreateImage((image.width, image.height), 8, 1)
cv.CvtColor(image, grey, cv.CV_RGBA2GRAY)
cv.ShowImage('Greyed', grey)

smoothed = cv.CloneImage(image)
cv.Smooth(image, smoothed, cv.CV_MEDIAN)
cv.ShowImage("Smoothed", smoothed)

cv.EqualizeHist(grey, grey)
cv.ShowImage("Equalized", grey)

threshold1 = cv.CloneImage(grey)
cv.Threshold(threshold1, threshold1, 100, 255, cv.CV_THRESH_BINARY)
cv.ShowImage("Threshold", threshold1)

threshold2 = cv.CloneImage(grey)
cv.Threshold(threshold2, threshold2, 100, 255, cv.CV_THRESH_OTSU)
cv.ShowImage("Threshold 2", threshold2)

element_shape = cv.CV_SHAPE_RECT
pos = 3
element = cv.CreateStructuringElementEx(pos * 2 + 1, pos * 2 + 1, pos, pos,
                                        element_shape)
cv.Dilate(grey, grey, element, 2)

cv.ShowImage("Dilated", grey)
Ejemplo n.º 28
0
    # show the im
    cv.ShowImage(win_name, col_edge)

if __name__ == '__main__':
    if len(sys.argv) > 1:
        im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
    else:
        url = 'https://raw.github.com/opencv/opencv/master/samples/c/fruits.jpg'
        filedata = urllib2.urlopen(url).read()
        imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
        cv.SetData(imagefiledata, filedata, len(filedata))
        im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)

    # create the output im
    col_edge = cv.CreateImage((im.width, im.height), 8, 3)

    # convert to grayscale
    gray = cv.CreateImage((im.width, im.height), 8, 1)
    edge = cv.CreateImage((im.width, im.height), 8, 1)
    cv.CvtColor(im, gray, cv.CV_BGR2GRAY)

    # create the window
    cv.NamedWindow(win_name, cv.CV_WINDOW_AUTOSIZE)

    # create the trackbar
    cv.CreateTrackbar(trackbar_name, win_name, 1, 100, on_trackbar)

    # show the im
    on_trackbar(0)
Ejemplo n.º 29
0
def runtracking():
    global rgb_image, hsv_image, hsvmouse, pausecam, hsvgreen, hsvyellow, hsvblue, hsvred, homographycomputed
    global hsvyellowtab, hsvrange
    global homography, pose_flag
    global hsvyellowmin, hsvyellowmax, hsvgreenmin, hsvgreenmax, hsvbluemin, hsvbluemax, hsvredmin, hsvredmax
    global cycloppoint, righteyepoint, lefteyepoint
    global capture, pausecam, size_image
    global yellowmask_image, greenmask_image, redmask_image, bluemask_image
    global p_num, modelepoints, blob_centers
    global rx, ry, rz
    global background

    size_thumb = [size_image[0] / 2, size_image[1] / 2]

    thumbgreen = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1)
    thumbred = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1)
    thumbblue = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1)
    thumbyellow = cv.CreateImage(size_thumb, cv.IPL_DEPTH_8U, 1)

    cv.NamedWindow("GreenBlobDetection", cv.CV_WINDOW_AUTOSIZE)
    cv.ShowImage("GreenBlobDetection", thumbgreen)

    cv.NamedWindow("YellowBlobDetection", cv.CV_WINDOW_AUTOSIZE)
    cv.ShowImage("YellowBlobDetection", thumbyellow)

    cv.NamedWindow("BlueBlobDetection", cv.CV_WINDOW_AUTOSIZE)
    cv.ShowImage("BlueBlobDetection", thumbblue)

    cv.NamedWindow("RedBlobDetection", cv.CV_WINDOW_AUTOSIZE)
    cv.ShowImage("RedBlobDetection", thumbred)

    rgb_image = cv.QueryFrame(capture)
    cv.NamedWindow("Source", cv.CV_WINDOW_AUTOSIZE)

    cv.SetMouseCallback("Source", getObjectHSV)

    print "Hit ESC key to quit..."

    # infinite loop for processing
    while True:

        time.sleep(0.02)
        blobcentergreen = findBlob(rgb_image, hsv_image, greenmask_image,
                                   greenblob_image, hsvrange, hsvgreenmin,
                                   hsvgreenmax, 'g')
        blobcenteryellow = findBlob(rgb_image, hsv_image, yellowmask_image,
                                    yellowblob_image, hsvrange, hsvyellowmin,
                                    hsvyellowmax, 'y')
        blobcenterblue = findBlob(rgb_image, hsv_image, bluemask_image,
                                  blueblob_image, hsvrange, hsvbluemin,
                                  hsvbluemax, 'b')
        blobcenterred = findBlob(rgb_image, hsv_image, redmask_image,
                                 redblob_image, hsvrange, hsvredmin, hsvredmax,
                                 'r')

        if not pausecam:
            if (blobcentergreen != None):
                cv.Resize(greenblob_image, thumbgreen)
                cv.ShowImage("GreenBlobDetection", thumbgreen)
                # print "green center: %d %d %d" %blobcentergreen
            if (blobcenteryellow != None):
                cv.Resize(yellowblob_image, thumbyellow)
                cv.ShowImage("YellowBlobDetection", thumbyellow)
                # print "yellow center: %d %d %d" %blobcenteryellow
            if (blobcenterblue != None):
                cv.Resize(blueblob_image, thumbblue)
                cv.ShowImage("BlueBlobDetection", thumbblue)
                # print "blue center: %d %d %d" %blobcenterblue
            if (blobcenterred != None):
                cv.Resize(redblob_image, thumbred)
                cv.ShowImage("RedBlobDetection", thumbred)
                # print "red center: %d %d %d" %blobcenterred

        cv.ShowImage("Source", rgb_image)
        c = cv.WaitKey(7) % 0x100
        if c == 27:
            break
        if c == ord('p') or c == ord('P'):
            pausecam = not pausecam

        if c == ord('y'):
            hsvyellowtab.append(hsvmouse)
            hsvyellowmin = mintab(hsvyellowtab)
            hsvyellowmax = maxtab(hsvyellowtab)
            print "minyellow"
            print hsvyellowmin
            print "maxyellow"
            print hsvyellowmax
        if c == ord('Y'):
            if (len(hsvyellowtab) > 0):
                hsvyellowtab.pop(len(hsvyellowtab) - 1)
            if (len(hsvyellowtab) != 0):
                hsvyellowmin = mintab(hsvyellowtab)
                hsvyellowmax = maxtab(hsvyellowtab)
            else:
                hsvyellowmin = [255, 255, 255]
                hsvyellowmax = [0, 0, 0]
        if c == ord('g'):
            hsvgreentab.append(hsvmouse)
            hsvgreenmin = mintab(hsvgreentab)
            hsvgreenmax = maxtab(hsvgreentab)
            print "mingreen"
            print hsvgreenmin
            print "maxgreen"
            print hsvgreenmax
        if c == ord('G'):
            if (len(hsvgreentab) > 0):
                hsvgreentab.pop(len(hsvgreentab) - 1)
            if (len(hsvgreentab) != 0):
                hsvgreenmin = mintab(hsvgreentab)
                hsvgreenmax = maxtab(hsvgreentab)
            else:
                hsvgreenmin = [255, 255, 255]
                hsvgreenmax = [0, 0, 0]
        if c == ord('r'):
            hsvredtab.append(hsvmouse)
            hsvredmin = mintab(hsvredtab)
            hsvredmax = maxtab(hsvredtab)
            print "minred"
            print hsvredmin
            print "maxred"
            print hsvredmax
        if c == ord('R'):
            if (len(hsvredtab) > 0):
                hsvredtab.pop(len(hsvredtab) - 1)
            if (len(hsvredtab) != 0):
                hsvredmin = mintab(hsvredtab)
                hsvredmax = maxtab(hsvredtab)
            else:
                hsvredmin = [255, 255, 255]
                hsvredmax = [0, 0, 0]
            print "RRR"
            print "min red"
            print hsvredmin
            print "max red"
            print hsvredmax
        if c == ord('b'):
            hsvbluetab.append(hsvmouse)
            hsvbluemin = mintab(hsvbluetab)
            hsvbluemax = maxtab(hsvbluetab)
            print "minblue"
            print hsvbluemin
            print "maxblue"
            print hsvbluemax
        if c == ord('B'):
            if (len(hsvbluetab) > 0):
                hsvbluetab.pop(len(hsvbluetab) - 1)
            if (len(hsvbluetab) != 0):
                hsvbluemin = mintab(hsvbluetab)
                hsvbluemax = maxtab(hsvbluetab)
            else:
                hsvbluemin = [255, 255, 255]
                hsvbluemax = [0, 0, 0]
        if c == ord('s'):
            f = open("last_range.txt", 'w')
            for hsv in [
                    hsvredmin, hsvredmax, hsvgreenmin, hsvgreenmax, hsvbluemin,
                    hsvbluemax, hsvyellowmin, hsvyellowmax
            ]:
                map(lambda v: f.write(str(int(v)) + ','), hsv)
                f.write('\n')
            f.close()
            print 'saved ranges'
        if c == ord('l'):
            f = open("last_range.txt", 'r')
            lines = f.readlines()
            [
                hsvredmin, hsvredmax, hsvgreenmin, hsvgreenmax, hsvbluemin,
                hsvbluemax, hsvyellowmin, hsvyellowmax
            ] = map(lambda l: map(lambda v: int(v),
                                  l.split(',')[:-1]), lines)
            print "loaded ranges:\n"
            print lines
        # if c == ord('R') :
    #                step=0
        if not pausecam:
            rgb_image = cv.QueryFrame(capture)
            cv.Flip(rgb_image, rgb_image, 1)  # flip l/r

    # after blob center detection we need to launch pose estimation
        if ((blobcentergreen != None) and (blobcenteryellow != None)
                and (blobcenterblue != None) and (blobcenterred != None)):
            #order is Yellow,blue,red, green
            pose_flag = 1
            blob_centers = []
            blob_centers.append((blobcenteryellow[0] - size_image[0] / 2,
                                 blobcenteryellow[1] - size_image[1] / 2))
            blob_centers.append((blobcenterblue[0] - size_image[0] / 2,
                                 blobcenterblue[1] - size_image[1] / 2))
            blob_centers.append((blobcenterred[0] - size_image[0] / 2,
                                 blobcenterred[1] - size_image[1] / 2))
            blob_centers.append((blobcentergreen[0] - size_image[0] / 2,
                                 blobcentergreen[1] - size_image[1] / 2))

            # get the tracking matrix (orientation and position) result with
            # POSIT method in the tracker (camera) referential
            matrix = find_pose(p_num, blob_centers, modelepoints)

            # We want to get the tracking result in the world referencial, i.e. with  at 60 cm of the midle of the screen, with Y up, and Z behind you.
            # The tracker referential in the camera referential, with the X axis pointing to the
            # left, the Y axis pointing down, and the Z axis pointing behind
            # you, and with the camera as origin.

            # We thus pre multiply to have the traking results in the world
            # referential, and not in the tracker (camera) referential.
            # (pre-product)
            pre_tranform_matrix = WordToTrackerTransform(matrix)

            # We do not want to track the center of the body referential (the right up point of the glasses), but the midlle of the two eyes in monoscopic (cyclops eye),
            # or left and right eyes in stereoscopic.

            # We thus post multiply the world traking results in the world
            # referential, using the referential of the eye in the body
            # referential (glasses)
            pre_tranform_matrix_post_cylcope_eye = BodyToCyclopsEyeTransform(
                pre_tranform_matrix)
            poscyclope = [
                pre_tranform_matrix_post_cylcope_eye[3][0],
                pre_tranform_matrix_post_cylcope_eye[3][1],
                pre_tranform_matrix_post_cylcope_eye[3][2]
            ]
            # print "poscylope", poscyclope

            pre_tranform_matrix_post_left_eye = BodyToLeftEyeTransform(
                pre_tranform_matrix)
            posleft = [
                pre_tranform_matrix_post_left_eye[3][0],
                pre_tranform_matrix_post_left_eye[3][1],
                pre_tranform_matrix_post_left_eye[3][2]
            ]
            # print "posleft",posleft

            pre_tranform_matrix_post_right_eye = BodyToRightEyeTransform(
                pre_tranform_matrix)
            posright = [
                pre_tranform_matrix_post_right_eye[3][0],
                pre_tranform_matrix_post_right_eye[3][1],
                pre_tranform_matrix_post_right_eye[3][2]
            ]
            # print "posright",posright

            sendPosition("/tracker/head/pos_xyz/cyclope_eye", poscyclope)
            sendPosition("/tracker/head/pos_xyz/left_eye", posleft)
            sendPosition("/tracker/head/pos_xyz/right_eye", posright)
Ejemplo n.º 30
0
import cv2.cv as cv
image = cv.LoadImage('11.jpg')
grayimg = cv.CreateImage(cv.GetSize(image), image.depth, 1)
for i in range(image.height):
    for j in range(image.width):
        grayimg[i, j] = max(image[i, j][0], image[i, j][1], image[i, j][2])
cv.ShowImage('srcImage', image)
cv.ShowImage('grayImage', grayimg)
cv.WaitKey(0)