Ejemplo n.º 1
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
                                cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
            haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the 
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    cv.ShowImage("result", img)
Ejemplo n.º 2
0
def detect_and_draw(img, cascade, jpg_cnt):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 10000))
        if faces:
            for ((x, y, w, h), n) in faces:

                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))

                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

                if jpg_cnt % 50 == 1:
                    print('capture completed')
                    cv.SaveImage('test_' + str(jpg_cnt) + '.jpg', img)
                    print("aaa1")
                    url = 'http://210.94.185.52:8080/upload.php'
                    #files={ 'upfiles' : open('/home/lee/test_'+str(jpg_cnt)+'.jpg','rb')}
                    files = {
                        'upfiles':
                        open('/home/lee/test_' + str(jpg_cnt) + '.jpg', 'rb')
                    }
                    print("aaa2")
                    r = requests.post(url, files=files)
                    print("aaa3")
                    print(r.text)
                    for i in r.text.split():
                        try:
                            op = float(i)
                            break
                        except:
                            continue
                    print(op)
                    #LED
                    if op >= 0.9:
                        lock_on()
                    else:
                        print('no')

    cv.ShowImage("result", img)
Ejemplo n.º 3
0
def detect_and_draw(img, cascade, c):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    face_flag = False

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            face_flag = True
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))

                # ある程度顔が検出されたら
                if c > 4:
                    # 画像の保存
                    global counter
                    counter = -1
                    d = datetime.today()
                    datestr = d.strftime('%Y-%m-%d_%H-%M-%S')
                    outputname = '/home/pi/fd/fd_' + datestr + '.jpg'
                    cv.SaveImage(outputname, img)
                    print 'Face Detect'

                    # 読み込みと切り取り
                    fimg = cv.LoadImage(outputname)
                    fimg_trim = fimg[pt1[1]:pt2[1], pt1[0]:pt2[0]]
                    outputname2 = '/home/pi/fd/face_' + datestr + '.jpg'
                    cv.SaveImage(outputname2, fimg_trim)
                    print 'Face Image Save'

                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    cv.ShowImage("result", img)

    return face_flag
Ejemplo n.º 4
0
def readDirectory(fileLocation, cascade):
    for root, dirs, files in os.walk(fileLocation):
        print root, "has:"
        for name in files:
            if name.find(".jpg") >= 1:
                # sequentially loop, load and detect.
                print "Analysing " + name + ":"
                # measure how long it takes
                t = cv.GetTickCount()
                # load in the image
                image = cv.LoadImage(os.path.join(root, name), 1)
                match = detectFace(image, cascade)
                if match:
                    # save a new image with a box round each face
                    cv.SaveImage(fileLocation + "/face_" + name, match)
                t = cv.GetTickCount() - t
                print "\tTime = %gms" % (t / (cv.GetTickFrequency() * 1000.0))
Ejemplo n.º 5
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            facenum = 0
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

                #code copied from https://github.com/mitchtech/py_servo_facetracker/blob/master/facetracker_servo_gpio.py

                x1 = pt1[0]
                x2 = pt2[0]
                y1 = pt1[1]
                y2 = pt2[1]
                midFaceX = x1 + ((x2 - x1) / 2)
                midFaceY = y1 + ((y2 - y1) / 2)
                facenum = facenum + 1
                client.publish(topic + str(facenum),
                               str(midFaceX) + "," + str(midFaceY), 0)

                print topic + str(facenum), str(midFaceX) + "," + str(midFaceY)

    cv.ShowImage("result", img)
Ejemplo n.º 6
0
def detect_and_draw(img, cascade, mask):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            for ((x, y, w, h), n) in faces:

                # Affichage du carré de recherche
                xmoustache = int((x * image_scale) + w * 0.5)
                ymoustache = int((y * image_scale) + h * 1.25)
                wmoustache = int(w * 0.5 * image_scale)
                hmoustache = int(h * 0.19 * image_scale)
                img_mask = cv.CreateImage((wmoustache, hmoustache), mask.depth,
                                          mask.nChannels)
                cv.SetImageROI(
                    img, (xmoustache, ymoustache, wmoustache, hmoustache))
                cv.Resize(mask, img_mask, cv.CV_INTER_LINEAR)

                # Affichage du carré de recherche
                cv.Sub(img, img_mask, img)
                cv.ResetImageROI(img)
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                #cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    cv.ShowImage("result", img)
Ejemplo n.º 7
0
def detect_and_draw(img,cascade):
    gray=cv.CreateImage((img.width,img.height),8,1)
    small_img=cv.CreateImage((cv.Round(img.width/image_scale),cv.Round(img.height/image_scale)),8,1)
    cv.CvtColor(img,gray,cv.CV_BGR2GRAY)
    cv.Resize(gray,small_img,cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img,small_img)

    if(cascade):
        t=cv.GetTickCount()
        faces=cv.HaarDetectObjects(small_img,cascade,cv.CreateMemStorage(0),haar_scale,min_neighbors,haar_flags,min_size)
        t=cv.GetTickCount()-t
        print "time taken for detection = %gms"%(t/(cv.GetTickFrequency()*1000.))
        if faces:
            for ((x,y,w,h),n) in faces:
                pt1=(int(x*image_scale),int(y*image_scale))
                pt2=(int((x+w)*image_scale),int((y+h)*image_scale))
                cv.Rectangle(img,pt1,pt2,cv.RGB(255,0,0),3,8,0)

        cv.ShowImage("video",img)
Ejemplo n.º 8
0
def detect_and_draw(img, out_put, cascade):
    img = cv.LoadImage(img, 1)
    res = {'faces': 0, 'data': []}
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        cascade = cv.Load(cascade)
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            i = 0
            for ((x, y, w, h), n) in faces:
                i = i + 1
                res['data'].append({'x': x, 'y': y, 'w': w, 'h': h})
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
            res['faces'] = i
        print res
        #with open(sys_path + r'/res.json', 'w') as outfile:
        with open(out_put, 'w') as outfile:
            json.dump(res, outfile)

    cv.SaveImage(sys_path + r'/debug.jpg', img)
Ejemplo n.º 9
0
def detect_and_draw(img, cascade):
# Acondicionamiento de la imagen, imagenes temporales
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
			    cv.Round (img.height / image_scale)), 8, 1)
# Crea la mascara
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
#Escala la imagen para procesarla homogeneamente
#Aplicacion de la Imagen Integral
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img, small_img)
# PROcesamiento de la Imagen
    if(cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "Tiempo de la deteccion = %gms" % (t/(cv.GetTickFrequency()*1000.))
	if faces:
            for ((x, y, w, h), n) in faces:
#calcula los puntos de deteccion
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
		posx = int(((x+(x+w))/2)* image_scale)
 		posy = int(((y+(y+h))/2)* image_scale)
#dibuja un circulo en el centro de la imagen y otro en el centro del rostro
		cv.Circle(img, (80,60), 5, cv.RGB(0,0,255), 1, 8, 0)
		cv.Circle(img, (posx, posy), 5, cv.RGB(0,255,0), 1, 8, 0)
#si el estado esta libre enviamos datos de posicion por i2c
		state = readData()
		time.sleep(0.005)
		if state == 1:
			sendData(posx)
            		sendData(posy)
		print 'posx: ' + str(posx) + ' posy: ' + str(posy)

	cv.ShowImage("video", img)
Ejemplo n.º 10
0
def detect_and_draw(img,cascade):
    gray=cv.CreateImage((img.width,img.height),8,1)
    small_img=cv.CreateImage((cv.Round(img.width/image_scale),cv.Round(img.height/image_scale)),8,1)
    cv.CvtColor(img,gray,cv.CV_BGR2GRAY)
    cv.Resize(gray,small_img,cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img,small_img)

    if(cascade):
        t=cv.GetTickCount()
        faces=cv.HaarDetectObjects(small_img,cascade,cv.CreateMemStorage(0),haar_scale,min_neighbors,haar_flags,min_size)
        t=cv.GetTickCount()-t
        print "time taken for detection = %gms"%(t/(cv.GetTickFrequency()*1000.))
        scan_image = 1
        if faces:
            for ((x,y,w,h),n) in faces:
                pt1=(int(x*image_scale),int(y*image_scale))
                pt2=(int((x+w)*image_scale),int((y+h)*image_scale))
                sub_face = img[int(y*image_scale):int((y+h)*image_scale),int(x*image_scale):int((x+w)*image_scale)]
                cv.SaveImage(str("/home/pi/Documents/Recognise_test_sample/images/")+".jpg",sub_face)
                cv.Rectangle(img,pt1,pt2,cv.RGB(255,0,0),3,8,0)
                scan_image =scan_image+1

        cv.ShowImage("Input Image",img)
        cv.SaveImage("detected_faces.jpg",img)
    conn.commit()
    conn.close()
    
    
Id=raw_input('Enter the User ID')
Name=raw_input('Enter the User Name')

database="D:\IOT Project for Semester IV\Program\Face Recognition\Attendance.db"
insertOrUpdate(database)
sampleNum=0;
while(True):
    ret, img = cap.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    t = cv.GetTickCount()
    faces = detector.detectMultiScale(gray, 1.4, 5)
    t = cv.GetTickCount() - t
    for (x,y,w,h) in faces:
        cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
        sampleNum=sampleNum+1
        cv2.imwrite("dataset2/User."+Id +'.'+ str(sampleNum) + ".jpg", gray[y:y+h,x:x+w])
        
        cv2.imshow('frame',img)
    if(cv2.waitKey(100) & 0xFF == ord('q')):
        break
    elif(sampleNum>70):
        break
print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
print("Faces Found:",len(faces))
cap.release()
cv2.destroyAllWindows()    
Ejemplo n.º 12
0
def detect_and_draw(img, cascade):

    # allocate temporary images

    gray = cv.CreateImage((img.width,img.height), 8, 1)

    #create a image with smaller size

    small_img = cv.CreateImage((cv.Round(img.width / image_scale),

                   cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale

    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
    
    # scale input image for faster processing

    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    #if algorithm is present

    if(cascade):

        #to get the current time

        t = cv.GetTickCount()

        #create memory for calculation(createMemStorage)

        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)

        #previous time minus current time

        t = cv.GetTickCount() - t

        print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))

        i=0

	#if more then one faces detected
        if faces:

		#getting all the coordinates of face
            for ((x, y, w, h), n) in faces:

                i=1;

                # the input to cv.HaarDetectObjects was resized, so scale the

                # bounding box of each face and convert it to two CvPoints

                pt1 = (int(x * image_scale), int(y * image_scale))

                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))

                
                #draw rectangle (imagename,topleft,bottomright,color,size)

                cv.Rectangle(img,pt1,pt2,(0,230,0),1)

                #crop the image

                var1 = img[y: y + h, x: x + w]

                cv.SaveImage("face/database/image.png",var1)

                name="face/database/image.png"

                img=Image.open(name).convert('LA')

                img.save(name)

                break;

        cv.DestroyAllWindows()

        if i == 1:

            os.system("python resize.py")


        if i == 0:

            os.remove("face/database/image.png")
Ejemplo n.º 13
0
            #delete old files
            for filename in files:
                if (filename == imagefile):
                    break
                # os.remove(filename)

            if (imagefile == oldimage):
                #no new image from raspistill
                time.sleep(0.1)
            else:
                # uncomment for spare cpu (reduce frame rate)
                # time.sleep (0.1)
                frame = cv.LoadImage(imagefile, cv.CV_LOAD_IMAGE_COLOR)
                oldimage = imagefile
        else:
            frame = cv.QueryFrame(capture)
        cv.ShowImage("result", frame)
        if cv.WaitKey(10) >= 0:
            break
        t = cv.GetTickCount() - t
        print "capture = %gfps" % (1000 / (t /
                                           (cv.GetTickFrequency() * 1000.)))
p.kill()
cv.DestroyWindow("result")

oldtime = 0
for filename in files:
    mtime = os.path.getmtime(filename)
    print mtime - oldtime
    oldtime = mtime
Ejemplo n.º 14
0
                # list most recent images,
                # and get the 2nd most recent image
                # since this is the last complete one

                files = filter(os.path.isfile,
                               glob.glob('/run/shm/' + "image*jpg"))
                files.sort(key=lambda x: os.path.getmtime(x))
                imagefile = (files[-2])

                # print imagefile
                frame = cv.LoadImage(imagefile, cv.CV_LOAD_IMAGE_COLOR)

            else:
                #usb cam
                frame = cv.QueryFrame(capture)

            detected = detect_and_draw(frame, cascade, detected)

            # uncomment if you want some spare cpu - reduced from 7fps to 5fps
            # time.sleep(0.1)

            t = cv.GetTickCount() - t
            fps = int(1000 / (t / (cv.GetTickFrequency() * 1000)))
            # print fps

            #exit when any key pressed
            if cv.WaitKey(10) >= 0:
                break

    cv.DestroyWindow("result")
Ejemplo n.º 15
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    ######   若解析度有改變,下面劃線座標亦隨之改變##############
    cv.Line(img, (width / middle_w, 0), (width / middle_w, height),
            (0, 10, 255), 3)
    cv.Line(img, ((width / middle_w - 20), (height / middle_h - 10)),
            ((width / middle_w - 20), (height / middle_h + 10)), (0, 10, 255),
            2)
    cv.Line(img, ((width / middle_w + 20), (height / middle_h - 10)),
            ((width / middle_w + 20), (height / middle_h + 10)), (0, 10, 255),
            2)
    cv.Line(img, (0, height / middle_h), (width, height / middle_h),
            (0, 10, 255), 3)
    cv.Line(img, ((width / middle_w - 10), (height / middle_h - 20)),
            ((width / middle_w + 10), (height / middle_h - 20)), (0, 10, 255),
            2)
    cv.Line(img, ((width / middle_w - 10), (height / middle_h + 20)),
            ((width / middle_w + 10), (height / middle_h + 20)), (0, 10, 255),
            2)
    #cv.ShowImage("camera", img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                ##################################################################################################3333
                cx = (int(x * image_scale) + int((x + w) * image_scale)) / 2
                cy = (int(y * image_scale) + int((y + h) * image_scale)) / 2
                print cx, cy
                #将文字框加入到图片中,(5,30)定义了文字框左顶点在窗口中的位置,最后参数定义文字颜色
                '''
                if cx <= (width*2/3) and cx >= (width*1/3) and cy <= (height*2/3) and cy >= (height*1/3) :
                     TestStr = "Locking"
                     cv.PutText(img, TestStr , (5,30), font, (0,0,255))
                else:
                     TestStr = "serching...."
                     cv.PutText(img, TestStr , (160,30), font, (0,255,0))
                '''

                if cx <= (width * 4 / 7) and cx >= (width * 3 / 7) and cy <= (
                        height * 4 / 7) and cy >= (height * 3 / 7):
                    TestStr = "Locking"
                    cv.PutText(img, TestStr, (5, 30), font, (0, 0, 255))
                else:
                    TestStr = "serching...."
                    cv.PutText(img, TestStr, (160, 30), font, (0, 255, 0))

#################################################################################################################
                if cx < img.width * 3 / 7:
                    arduino.write('4')
                    print '4'
                if cx < img.width * 2 / 7:
                    arduino.write('44')
                    print '4'
                if cx < img.width / 7:
                    arduino.write('4444')
                    print '44'

                if cx > img.width * 4 / 7:
                    arduino.write('6')
                    print '6'
                if cx > img.width * 5 / 7:
                    arduino.write('66')
                    print '6'
                if cx > img.width * 6 / 7:
                    arduino.write('6666')
                    print '66'
                if cy < img.height * 3 / 7:
                    arduino.write('2')
                    print '2'
                if cy < img.height * 2 / 7:
                    arduino.write('22')
                    print '2'
                if cy < img.height / 7:
                    arduino.write('2222')
                    print '222'
                if cy > img.height * 4 / 7:
                    arduino.write('8')
                    print '8'
                if cy > img.height * 5 / 7:
                    arduino.write('88')
                    print '8'
                if cy > img.height * 6 / 7:
                    arduino.write('8888')
                    print '888'
                break


######################################################

    cv.ShowImage("result", img)
Ejemplo n.º 16
0
    def detect_and_draw(self, originalImage):
        # allocate temporary images
        
        print type(originalImage)
        grayScaleFullImage = cv.CreateImage((originalImage.width, originalImage.height), 8, 1)
        smallScaleFullImage = cv.CreateImage((cv.Round(originalImage.width / image_scale),
                       cv.Round (originalImage.height / image_scale)), 8, 1)
    
        # convert color input image to grayscale
        cv.CvtColor(originalImage, grayScaleFullImage, cv.CV_BGR2GRAY)
    
        # scale input image for faster processing
        cv.Resize(grayScaleFullImage, smallScaleFullImage, cv.CV_INTER_LINEAR)
    
        cv.EqualizeHist(smallScaleFullImage, smallScaleFullImage)
    
        if(self.cascade):
            t = cv.GetTickCount()
            # detect faces
            faces = cv.HaarDetectObjects(smallScaleFullImage, self.cascade, cv.CreateMemStorage(0),
                                         haar_scale, min_neighbors, haar_flags, min_size)
            t = cv.GetTickCount() - t
            print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
            if faces:
                print "detected face"
                for ((x, y, w, h), n) in faces:
                    # the input to cv.HaarDetectObjects was resized, so scale the
                    # bounding box of each face and convert it to two CvPoints
                    pt1 = (int(x * image_scale), int(y * image_scale))
                    pt11 = (int(x * image_scale) + 10, int(y * image_scale) + 10)
                    pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                    # face 
                    cv.Rectangle(originalImage, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                    
                    if isOpticalFlow:
                        originalArray2 = cv.CloneImage(originalImage)
                        faceArea = cv.GetSubRect(originalArray2, (pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]))
                        faceArea2 = cv.CloneMat(faceArea)
                        cv.ShowImage("face area", faceArea2)
                        self.MotionDetector.iterativeMotionDetector(faceArea2)
                                        
     
                    # get the center of the rectangle
                    centerX = (pt1[0] + pt2[0]) / 2     
                    centerY = (pt1[1] + pt2[1]) / 2 + int(0.1 * w * image_scale)
                      
                    # around nose region
                    cv.Rectangle(originalImage, (centerX, centerY), (centerX + 10, centerY + 10), cv.RGB(255, 0, 255))   
                    
                         
                    # detect left eye
                    # cv.SetZero(sub)  55
                    self.detectLeftEye(originalImage, self.cascade2, pt1, centerX, centerY)
                    
                    # detect right eye
                    rightEyeArea = cv.GetSubRect(originalImage, (centerX, pt1[1], pt2[0] - centerX  , centerY - pt1[1]))
                    # cv.SetZero(rightEyeArea)    
                    self.detectRightEye(originalImage, rightEyeArea, centerX, centerY, pt1, self.cascade2)
                    
#                     self.detectNose(originalImage, cascade4, centerX, centerY)
                    
                            
                    
                
                 
                     
                    
                    # now apply mask for values in range +/- 10% of index_1
                    # form a map for showing the eyebrows
                    # cloneImageArray = cv.CloneMat(imageArray)
                    # cloneImageArray = np.empty_like (imageArray)
                    # cloneImageArray[:] = imageArray
                    # cv2.imshow("left eye " ,cloneImageArray)
                
                    # res = cv2.bitwise_and(cloneImageArray,cloneImageArray,mask = backproj)
                    # cv2.imshow("res" ,res)
                
                
                    # detect left eyebrow
                    # by doing simple contour detection
    #                 print type(leftEyeArea)
    #                 gray_im = cv.CreateMat(leftEyeArea.height, leftEyeArea.width, cv.CV_8UC1)
    #                 #gray_im = cv.CreateImage((leftEyeArea.rows, leftEyeArea.cols), cv.IPL_DEPTH_8U, 1)
    #                 print type(gray_im)
    #                 cv.CvtColor(leftEyeArea, gray_im, cv.CV_RGB2GRAY)
    #                 imageArray = np.asarray(gray_im, dtype=np.uint8)
    #                 #floatMat.convertTo(ucharMat, CV_8UC1);
    # 
    #                 # scale values from 0..1 to 0..255
    #                 #floatMat.convertTo(ucharMatScaled, CV_8UC1, 255, 0); 
    #                 contours0, hier = cv2.findContours( backproj , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # #               
    # 
    #                 cv_im = cv.CreateMat(img.width, img.height, cv.CV_8UC3)
    #                 cv.SetData(cv_im, img.tostring())
    #                     
    #                 #print type(cv_im)
    #                 
    #                 originalImageArray = np.asarray(cv_im, dtype=np.uint8)
    #                 
    #                 print " length " + str(len(contours0))   
    #                 #print type(contours0)
    #                 
    #                 lines = None
    #                 linesList = list()
    #                 for item in contours0:
    #                     #print "item " + str(item)
    #                        
    #                     #print type(item)
    #                     for i in range(1, len(item)):
    #                         #for j in range(len(item[i][0])):
    #                         #print str(item[i][0][0]) + " " + str(item[i][0][1])
    #                         #lines.append([[item[i][0][0], item[i][0][1]]])
    #                         if lines != None:
    #                             np.append(lines, item[i][0])
    #                         else:
    #                             lines = np.array(item[i][0])
    #                         linesList.append((item[i][0][0] , item[i][0][1]))
    #                         #cv2.circle(backproj, ( item[i][0][0] , item[i][0][1]), 10, (255,255,255), 10)
    #                         #cv.Circle(img, (pt1[0] + item[i][0][0] ,int(pt1[1] * 1.1)+ item[i][0][1]), 5, (255,0,255))
    #                             
    #                             
    #                
    #                 
    #                 #print type(originalImageArray)
    #                 print lines
    #                 #cv2.polylines(originalImageArray, lines, True, cv.RGB(255, 255, 0), 10)
    #                 print type(linesList)
    #                 #cv.PolyLine(cv_im, linesList, False, cv.RGB(255, 255, 0), 10)
    #                 #cv2.drawContours(backproj, contours0, , cv.RGB(55, 55, 55))
                
                    
                    
                    # canny_output = None
                    # canny_output = cv2.Canny(backproj, 700, 1000, canny_output, 7)
                    # cv2.imshow("canny ", canny_output)
                    
                    # cv.Canny(hsv_image, contours0, 10, 60);
                    # contours, hier = cv2.findContours( canny_output , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #               
    
    
    
                   
                    
                    
                    
    
                
                    # cv2.drawContours(originalImageArray,lines,-1,(0,255,0),3)
                   
                    # detect mouth
                    mouthArea = cv.GetSubRect(originalImage, (pt1[0], centerY, pt2[0] - pt1[0], pt2[1] - centerY))
                    self.detectMouth(originalImage, mouthArea, pt1, centerY, self.cascade3)
                    
                    
                    
                    
                    # start tracking face
                    if not isOpticalFlow:
                        originalArray2 = cv.CloneImage(originalImage)
                        faceArea = cv.GetSubRect(originalArray2, (pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]))
                        faceArea2 = cv.CloneMat(faceArea)
                        return (True, faceArea2, originalImage, pt1, pt2)
                        
#                         originalImage2 = cv.CloneImage(originalImage)
#                         camshift = Camshift()
#                         camshift.defineRegionOfInterest(originalImage2, pt1, pt2)

#                         originalArray2 = cv.CloneImage(originalImage)
#                         faceArea = cv.GetSubRect(originalArray2, (pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]))
#                         faceArea2 = cv.CloneMat(faceArea)
#                         cv.ShowImage("face area", faceArea2)
#                         faceArray = np.asarray(faceArea2, np.uint8, 3)
#                         faceArray = cv2.cvtColor(faceArray, cv2.COLOR_BGR2GRAY)
#                         self.matcher.defineTargetImage(faceArray)
#                         self.matcher.findInVideoSequence()
                          
                    
                    
                              
                    
      
        cv.ShowImage("result", originalImage)
        
        return (False, originalImage, None, None, None)
Ejemplo n.º 17
0
x = dimension_x 
y = dimension_y 
cv2.namedWindow('img', cv2.WINDOW_NORMAL)
cv2.resizeWindow('img', x, y)          
#creamos la mascara
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.namedWindow('gray', cv2.WINDOW_NORMAL)
cv2.resizeWindow('gray', dimension_x, dimension_y)          
gray = cv2.resize(gray, (x,y))    
img = cv2.resize(img,(x,y))
#buscamos las coordenadas de los rostros (si existen)
#guardamos su posicion
t = cv.GetTickCount()
faces = face_cascade.detectMultiScale(gray, 1.2, 5)
t = cv.GetTickCount() - t
print "tiempo de la deteccion = %gms" % (t/(cv.GetTickFrequency()*1000.))
#dibujamos rectangulo en el rostro si existe
for (x,y,w,h) in faces:
    cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
#mostramos la imagen y su mascara
cv2.imshow('img',img)
cv2.imshow('gray',gray)
#para salir del programa
while(True):
	if cv2.waitKey(1) & 0xFF == ord('q'):
		break
cv2.destroyAllWindows()



Ejemplo n.º 18
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
                   cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

#######   若解析度有改變,下面劃線座標亦隨之改變##############
    '''
    cv.Line(img, (210,0),(210,480), (0,255,255),1) 
    cv.Line(img, (420,0),(420,480), (0,255,255),1) 
    cv.Line(img, (0,160),(640,160), (0,255,255),1) 
    cv.Line(img, (0,320),(640,320), (0,255,255),1)
    '''
    cv.Line(img, (width/2,0),(width/2,height), (0,10,255),3) 
    cv.Line(img, ((width/2-20),(height/2-10)),((width/2-20),(height/2+10)), (0,10,255),2)
    cv.Line(img, ((width/2+20),(height/2-10)),((width/2+20),(height/2+10)), (0,10,255),2) 
    cv.Line(img, (0,height/2),(width,height/2), (0,10,255),3) 
    cv.Line(img, ((width/2-10),(height/2-20)),((width/2+10),(height/2-20)), (0,10,255),2)
    cv.Line(img, ((width/2-10),(height/2+20)),((width/2+10),(height/2+20)), (0,10,255),2)
    
    if(cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
##################################################################################################3333
                cx = (int(x * image_scale) + int((x + w) * image_scale)) / 2
                cy = (int(y * image_scale) + int((y + h) * image_scale)) / 2
                print cx, cy
####################################################
                if cx < img.width*3/ 7 :
                     arduino.write('4')
                     print '4'
		if cx < img.width*2/ 7 :
                     arduino.write('44')
                     print '4'
		if cx < img.width/ 7 :
                     arduino.write('4444')
                     print '44'

                if cx > img.width*4 / 7 :
                     arduino.write('6')
                     print '6'
                if cx > img.width*5/ 7 :
                     arduino.write('66')
                     print '6'
                if cx > img.width*6/ 7 :
                     arduino.write('6666')
                     print '66'
                if cy < img.height*3/ 7:
                     arduino.write('2')
                     print '2'
                if cy < img.height*2/ 7:
                     arduino.write('22')
                     print '2'
                if cy < img.height/ 7:
                     arduino.write('2222')
                     print '222'
                if cy > img.height*4 / 7:
                     arduino.write('8')
                     print '8'
                if cy > img.height*5 / 7:
                     arduino.write('88')
                     print '8'
                if cy > img.height*6 / 7:
                     arduino.write('8888')
                     print '888'
		break
######################################################

    cv.ShowImage("result", img)
Ejemplo n.º 19
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "time taken for detection = %gms" % (
            t / (cv.GetTickFrequency() * 1000.))
    if faces:
        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

        cv.ShowImage("video", img)

    if __name__ == '__main__':

        parser = OptionParser(
            usage="usage: %prog [options] [filename|camera_index]")
        parser.add_option(
            "-c",
            "-cascade",
            action="store",
            dest="cascade",
            type="str",
            help="Haar cascade file, default %default",
            default="../data/haarcascades/haarcascade_frontalface_alt.xml")(
                options, args) = parser.parse_args()

    cascade = cv.Load(options.cascade)

    if len(args) != 1:
        parser.print_help()
        sys.exit(1)

    input_name = args[0]
    if input_name.isdigit():
        capture = cv.CreateCameraCapture(int(input_name))
    else:
        capture = None

    cv.NamedWindow("video", 1)

    #size of the video
    width = 160
    height = 120

    if width is None:
        width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
    else:
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)

    if height is None:
        height = int(
            cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
    else:
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)

    if capture:
        frame_copy = None
    while True:

        frame = cv.QueryFrame(capture)
        if not frame:
            cv.WaitKey(0)
            break
        if not frame_copy:
            frame_copy = cv.CreateImage((frame.width, frame.height),
                                        cv.IPL_DEPTH_8U, frame.nChannels)

        if frame.origin == cv.IPL_ORIGIN_TL:
            cv.Copy(frame, frame_copy)
        else:
            cv.Flip(frame, frame_copy, 0)

    detect_and_draw(frame_copy, cascade)

    if cv.WaitKey(10) >= 0:
        break
    else:
        image = cv.LoadImage(input_name, 1)
        detect_and_draw(image, cascade)
        cv.WaitKey(0)

    cv.DestroyWindow("video")