Exemplo n.º 1
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
                                cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
            haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the 
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    cv.ShowImage("result", img)
Exemplo n.º 2
0
def detect_and_draw(img, cascade, jpg_cnt):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 10000))
        if faces:
            for ((x, y, w, h), n) in faces:

                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))

                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

                if jpg_cnt % 50 == 1:
                    print('capture completed')
                    cv.SaveImage('test_' + str(jpg_cnt) + '.jpg', img)
                    print("aaa1")
                    url = 'http://210.94.185.52:8080/upload.php'
                    #files={ 'upfiles' : open('/home/lee/test_'+str(jpg_cnt)+'.jpg','rb')}
                    files = {
                        'upfiles':
                        open('/home/lee/test_' + str(jpg_cnt) + '.jpg', 'rb')
                    }
                    print("aaa2")
                    r = requests.post(url, files=files)
                    print("aaa3")
                    print(r.text)
                    for i in r.text.split():
                        try:
                            op = float(i)
                            break
                        except:
                            continue
                    print(op)
                    #LED
                    if op >= 0.9:
                        lock_on()
                    else:
                        print('no')

    cv.ShowImage("result", img)
Exemplo n.º 3
0
def track(img, threshold=100):
    '''Accepts BGR image and optional object threshold between 0 and 255 (default = 100).
       Returns: (x,y) coordinates of centroid if found
                (-1,-1) if no centroid was found
                None if user hit ESC
    '''
    cascade = cv.Load("haarcascade_frontalface_default.xml")
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    center = (-1, -1)
    faces = []
    original_size_faces = []
    #import ipdb; ipdb.set_trace()
    if (cascade):
        t = cv.GetTickCount()
        # HaarDetectObjects takes 0.02s
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                # cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                #cv.Rectangle(img, (x,y), (x+w,y+h), 255)
                # get the xy corner co-ords, calc the center location
                x1 = pt1[0]
                x2 = pt2[0]
                y1 = pt1[1]
                y2 = pt2[1]
                centerx = x1 + ((x2 - x1) / 2)
                centery = y1 + ((y2 - y1) / 2)
                center = (centerx, centery)

                scaled = ((x1, y1, x2 - x1, y2 - y1), n)
                original_size_faces.append(scaled)
                # print scaled


#    cv.NamedWindow(WINDOW_NAME, 1)
#    cv.ShowImage(WINDOW_NAME, img)

#    if cv.WaitKey(5) == 27:
#        center = None
    return (center, original_size_faces)
Exemplo n.º 4
0
def detect_and_draw(img, cascade, c):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    face_flag = False

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            face_flag = True
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))

                # ある程度顔が検出されたら
                if c > 4:
                    # 画像の保存
                    global counter
                    counter = -1
                    d = datetime.today()
                    datestr = d.strftime('%Y-%m-%d_%H-%M-%S')
                    outputname = '/home/pi/fd/fd_' + datestr + '.jpg'
                    cv.SaveImage(outputname, img)
                    print 'Face Detect'

                    # 読み込みと切り取り
                    fimg = cv.LoadImage(outputname)
                    fimg_trim = fimg[pt1[1]:pt2[1], pt1[0]:pt2[0]]
                    outputname2 = '/home/pi/fd/face_' + datestr + '.jpg'
                    cv.SaveImage(outputname2, fimg_trim)
                    print 'Face Image Save'

                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    cv.ShowImage("result", img)

    return face_flag
Exemplo n.º 5
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
			       cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        #print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        if faces:
            count = 0
            stop = 1
            name = 1
            no = 1
            dict = {}
            for num in range(14):
                dict[name] = no
                name += 1
            print dict
            f = open('no.json','w')
            json.dump(dict,f)
            #for count in range(14):
            #time.sleep(stop)
                #count += 1
                #print(count)
            #time.sleep(stop)
            #cv.PutText(img, "SAMPLE_TEXT", (0, 50), cv.CV_FONT_HERSHEY_PLAIN, cv.RGB(255, 255, 255))
            #cv.PutText(img, "SAMPLE_TEXT", (0, 50), cv.CV_FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 2, cv.CV_AA )
            for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the 
            # bounding box of each face and convert it to two CvPoints
                #for count in range(14):
                count += 1
                print(count)
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                #count = count + 1
                #print(count)
                # cv.putText(img, "SAMPLE_TEXT", (0, 50), FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 2, cv.CV_AA)


    cv.ShowImage("result", img)
Exemplo n.º 6
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CloneMat(img)# cv.CreateImage((img.width,img.height)) # (cv.Round(img.width / image_scale),cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(cascade):
        t = cv.GetTickCount()
        #Scan image and get an array of faces
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        #print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the 
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                print "X " , x 
                if int(x * image_scale) > (img.width * 0.45):
                    #print "X " , x
                    #print steppera.IsTurning()
                    if (steppera.IsTurning() == False):
                        if (stepperInUse[STEPPERA] == True):
                            sensor_value = "-4"
                            if isNumeric(sensor_value):
                                print "Moving to" , sensor_value
                                steppera.changeSpeed(int(100 * sign(int(float(sensor_value)) - 0)),abs(int(float(sensor_value)) - 0))
                                while (steppera.IsTurning() == True):
                                    cv.WaitKey(100)
                if int((x + w) * image_scale) < (img.width * 0.55):
                    #print "X " , x
                    #print steppera.IsTurning()
                    if (steppera.IsTurning() == False):
                        if (stepperInUse[STEPPERA] == True):
                            sensor_value = "4"
                            if isNumeric(sensor_value):
                                print "Moving to" , sensor_value
                                steppera.changeSpeed(int(100 * sign(int(float(sensor_value)) - 0)),abs(int(float(sensor_value)) - 0))
                                while (steppera.IsTurning() == True):
                                    cv.WaitKey(100)

    cv.ShowImage("result", img)
Exemplo n.º 7
0
def detect_and_draw(img, cascade):

    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)

    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
                   cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    #gray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)
    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    #cv.EqualizeHist(small_img, small_img)

    if(cascade):
        t = cv.GetTickCount()#to get the time, create memory for calculation(createMemStorage)

        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)

        t = cv.GetTickCount() - t#previous time minus current time

        #print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))

        roi=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
        ROI=['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30']

        i=0

        if faces:

            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))

                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))

                
                #draw rectangle (imagename,topleft,bottomright,color,size)
                cv.Rectangle(img,pt1,pt2,(0,230,0),1)

                roi[i] = img[y: y + h, x: x + w]

                #cv.ShowImage(ROI[i],roi[i])
                cv.SaveImage("face/temp/"+ROI[i]+".png",roi[i])
                i=i+1;
Exemplo n.º 8
0
def readDirectory(fileLocation, cascade):
    for root, dirs, files in os.walk(fileLocation):
        print root, "has:"
        for name in files:
            if name.find(".jpg") >= 1:
                # sequentially loop, load and detect.
                print "Analysing " + name + ":"
                # measure how long it takes
                t = cv.GetTickCount()
                # load in the image
                image = cv.LoadImage(os.path.join(root, name), 1)
                match = detectFace(image, cascade)
                if match:
                    # save a new image with a box round each face
                    cv.SaveImage(fileLocation + "/face_" + name, match)
                t = cv.GetTickCount() - t
                print "\tTime = %gms" % (t / (cv.GetTickFrequency() * 1000.0))
Exemplo n.º 9
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            facenum = 0
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

                #code copied from https://github.com/mitchtech/py_servo_facetracker/blob/master/facetracker_servo_gpio.py

                x1 = pt1[0]
                x2 = pt2[0]
                y1 = pt1[1]
                y2 = pt2[1]
                midFaceX = x1 + ((x2 - x1) / 2)
                midFaceY = y1 + ((y2 - y1) / 2)
                facenum = facenum + 1
                client.publish(topic + str(facenum),
                               str(midFaceX) + "," + str(midFaceY), 0)

                print topic + str(facenum), str(midFaceX) + "," + str(midFaceY)

    cv.ShowImage("result", img)
Exemplo n.º 10
0
def detect_and_draw(img, cascade, mask):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            for ((x, y, w, h), n) in faces:

                # Affichage du carré de recherche
                xmoustache = int((x * image_scale) + w * 0.5)
                ymoustache = int((y * image_scale) + h * 1.25)
                wmoustache = int(w * 0.5 * image_scale)
                hmoustache = int(h * 0.19 * image_scale)
                img_mask = cv.CreateImage((wmoustache, hmoustache), mask.depth,
                                          mask.nChannels)
                cv.SetImageROI(
                    img, (xmoustache, ymoustache, wmoustache, hmoustache))
                cv.Resize(mask, img_mask, cv.CV_INTER_LINEAR)

                # Affichage du carré de recherche
                cv.Sub(img, img_mask, img)
                cv.ResetImageROI(img)
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                #cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

    cv.ShowImage("result", img)
Exemplo n.º 11
0
def detect_and_draw(img,cascade):
    gray=cv.CreateImage((img.width,img.height),8,1)
    small_img=cv.CreateImage((cv.Round(img.width/image_scale),cv.Round(img.height/image_scale)),8,1)
    cv.CvtColor(img,gray,cv.CV_BGR2GRAY)
    cv.Resize(gray,small_img,cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img,small_img)

    if(cascade):
        t=cv.GetTickCount()
        faces=cv.HaarDetectObjects(small_img,cascade,cv.CreateMemStorage(0),haar_scale,min_neighbors,haar_flags,min_size)
        t=cv.GetTickCount()-t
        print "time taken for detection = %gms"%(t/(cv.GetTickFrequency()*1000.))
        if faces:
            for ((x,y,w,h),n) in faces:
                pt1=(int(x*image_scale),int(y*image_scale))
                pt2=(int((x+w)*image_scale),int((y+h)*image_scale))
                cv.Rectangle(img,pt1,pt2,cv.RGB(255,0,0),3,8,0)

        cv.ShowImage("video",img)
Exemplo n.º 12
0
def detect_and_draw(img, cascade, detected):
    # allocate temporary images

    gray = cv.CreateImage((img.width, img.height), 8, 1)
    image_scale = img.width / smallwidth
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        # t = cv.GetTickCount() - t
        # print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        if faces:
            if detected == 0:
                # os.system('festival --tts hi &')
                detected = 1
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                print "Face at: ", pt1[0], ",", pt2[0], "\t", pt1[1], ",", pt2[
                    1]
                # find amount needed to pan/tilt
                span = (pt1[0] + pt2[0]) / 2
                stlt = (pt1[1] + pt2[1]) / 2
                mid = smallwidth / 2
                if span < mid:
                    print "left", mid - span
                else:
                    print "right", span - mid

#os.system('echo "6="' + str(valTilt) + ' > /dev/pi-blaster')
#os.system('echo "7="' + str(valPan) + ' > /dev/pi-blaster')
        else:
            if detected == 1:
                #print "Last seen at: ", pt1[0], ",", pt2[0], "\t", pt1[1], ",", pt2[1]
                #os.system('festival --tts bye &')
                status = "just disappeared"
            detected = 0

    cv.ShowImage("result", img)
    return detected
Exemplo n.º 13
0
def detect_and_draw(img, cascade):
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    midFace = None

    if (cascade):
        t = cv.GetTickCount()
        # HaarDetectObjects takes 0.02s
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                # get the xy corner co-ords, calc the midFace location
                x1 = pt1[0]
                x2 = pt2[0]
                y1 = pt1[1]
                y2 = pt2[1]
                midFaceX = x1 + ((x2 - x1) / 2)
                midFaceY = y1 + ((y2 - y1) / 2)
                midFace = (midFaceX, midFaceY)

    cv.ShowImage("result", img)
    return midFace
Exemplo n.º 14
0
def detect_and_draw(img, out_put, cascade):
    img = cv.LoadImage(img, 1)
    res = {'faces': 0, 'data': []}
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        cascade = cv.Load(cascade)
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            i = 0
            for ((x, y, w, h), n) in faces:
                i = i + 1
                res['data'].append({'x': x, 'y': y, 'w': w, 'h': h})
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
            res['faces'] = i
        print res
        #with open(sys_path + r'/res.json', 'w') as outfile:
        with open(out_put, 'w') as outfile:
            json.dump(res, outfile)

    cv.SaveImage(sys_path + r'/debug.jpg', img)
Exemplo n.º 15
0
def detect_and_draw(img, cascade):
# Acondicionamiento de la imagen, imagenes temporales
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
			    cv.Round (img.height / image_scale)), 8, 1)
# Crea la mascara
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
#Escala la imagen para procesarla homogeneamente
#Aplicacion de la Imagen Integral
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img, small_img)
# PROcesamiento de la Imagen
    if(cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "Tiempo de la deteccion = %gms" % (t/(cv.GetTickFrequency()*1000.))
	if faces:
            for ((x, y, w, h), n) in faces:
#calcula los puntos de deteccion
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
		posx = int(((x+(x+w))/2)* image_scale)
 		posy = int(((y+(y+h))/2)* image_scale)
#dibuja un circulo en el centro de la imagen y otro en el centro del rostro
		cv.Circle(img, (80,60), 5, cv.RGB(0,0,255), 1, 8, 0)
		cv.Circle(img, (posx, posy), 5, cv.RGB(0,255,0), 1, 8, 0)
#si el estado esta libre enviamos datos de posicion por i2c
		state = readData()
		time.sleep(0.005)
		if state == 1:
			sendData(posx)
            		sendData(posy)
		print 'posx: ' + str(posx) + ' posy: ' + str(posy)

	cv.ShowImage("video", img)
def detect_and_draw(img,cascade):
    gray=cv.CreateImage((img.width,img.height),8,1)
    small_img=cv.CreateImage((cv.Round(img.width/image_scale),cv.Round(img.height/image_scale)),8,1)
    cv.CvtColor(img,gray,cv.CV_BGR2GRAY)
    cv.Resize(gray,small_img,cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img,small_img)

    if(cascade):
        t=cv.GetTickCount()
        faces=cv.HaarDetectObjects(small_img,cascade,cv.CreateMemStorage(0),haar_scale,min_neighbors,haar_flags,min_size)
        t=cv.GetTickCount()-t
      
        scan_image = 1
        if faces:
            for ((x,y,w,h),n) in faces:
                pt1=(int(x*image_scale),int(y*image_scale))
                pt2=(int((x+w)*image_scale),int((y+h)*image_scale))
                sub_face = img[int(y*image_scale):int((y+h)*image_scale),int(x*image_scale):int((x+w)*image_scale)]
                cv.SaveImage('/home/pi/Documents/Recognise_test_sample/images/testing/'+str(scan_image)+".jpg",sub_face)
                cv.Rectangle(img,pt1,pt2,cv.RGB(255,0,0),3,8,0)
                scan_image = scan_image+1
               
        cv.ShowImage("video",img)
        cv.SaveImage("detected_faces.jpg",img)
Exemplo n.º 17
0
def detect_and_draw(img, cascade, jpg_cnt):
    global count
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        #print "detection time = %gms" % (t/(cv.GetTickFrequency()*10000))
        if faces:
            for ((x, y, w, h), n) in faces:

                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))

                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

                if jpg_cnt % 10 == 1:
                    print('capture completed')
                    cv.SaveImage('test_' + str(jpg_cnt) + '.jpg', img)
                    #print("aaa1")
                    url = 'http://210.94.185.52:8080/d/upload.php'
                    #files={ 'upfiles' : open('/home/lee/test_'+str(jpg_cnt)+'.jpg','rb')}
                    files = {
                        'upfiles':
                        open(
                            '/home/pi/Desktop/ubuntu_backup/test_' +
                            str(jpg_cnt) + '.jpg', 'rb')
                    }
                    r = requests.post(url, files=files)
                    print(r.text)
                    global recog_name
                    global recog_confi

                    for i in r.text.split('/'):
                        if i == 'kim' or i == 'lee' or i == 'you' or i == 'min' or i == 'unknown':
                            recog_name = str(i)
                            #print(recog_name)
                        elif len(i) == 5:
                            recog_confi = float(i)
                        # recog_confi=i
                        #print(recog_confi)

                    #LED
                    if (recog_name == 'kim' or recog_name == 'lee'
                            or recog_name == 'you'
                            or recog_name == 'min') and recog_confi >= 0.90:
                        lock_on()
                        recog_confi = 0
                        count = 0
                    else:
                        for i in range(2):
                            GPIO.output(GPIO_BUZZER, True)
                            time.sleep(0.5)
                            GPIO.output(GPIO_BUZZER, False)
                            time.sleep(0.5)
                        count = count + 1
                        if count >= 3:
                            data = {'warning': '*Intruder Warning*'}
                            r = requests.post(
                                'http://snivy92.cafe24.com/fcm/push_notification.php',
                                data)

    cv.ShowImage("result", img)
Exemplo n.º 18
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
                   cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

#######   若解析度有改變,下面劃線座標亦隨之改變##############
    '''
    cv.Line(img, (210,0),(210,480), (0,255,255),1) 
    cv.Line(img, (420,0),(420,480), (0,255,255),1) 
    cv.Line(img, (0,160),(640,160), (0,255,255),1) 
    cv.Line(img, (0,320),(640,320), (0,255,255),1)
    '''
    cv.Line(img, (width/2,0),(width/2,height), (0,10,255),3) 
    cv.Line(img, ((width/2-20),(height/2-10)),((width/2-20),(height/2+10)), (0,10,255),2)
    cv.Line(img, ((width/2+20),(height/2-10)),((width/2+20),(height/2+10)), (0,10,255),2) 
    cv.Line(img, (0,height/2),(width,height/2), (0,10,255),3) 
    cv.Line(img, ((width/2-10),(height/2-20)),((width/2+10),(height/2-20)), (0,10,255),2)
    cv.Line(img, ((width/2-10),(height/2+20)),((width/2+10),(height/2+20)), (0,10,255),2)
    
    if(cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
##################################################################################################3333
                cx = (int(x * image_scale) + int((x + w) * image_scale)) / 2
                cy = (int(y * image_scale) + int((y + h) * image_scale)) / 2
                print cx, cy
####################################################
                if cx < img.width*3/ 7 :
                     arduino.write('4')
                     print '4'
		if cx < img.width*2/ 7 :
                     arduino.write('44')
                     print '4'
		if cx < img.width/ 7 :
                     arduino.write('4444')
                     print '44'

                if cx > img.width*4 / 7 :
                     arduino.write('6')
                     print '6'
                if cx > img.width*5/ 7 :
                     arduino.write('66')
                     print '6'
                if cx > img.width*6/ 7 :
                     arduino.write('6666')
                     print '66'
                if cy < img.height*3/ 7:
                     arduino.write('2')
                     print '2'
                if cy < img.height*2/ 7:
                     arduino.write('22')
                     print '2'
                if cy < img.height/ 7:
                     arduino.write('2222')
                     print '222'
                if cy > img.height*4 / 7:
                     arduino.write('8')
                     print '8'
                if cy > img.height*5 / 7:
                     arduino.write('88')
                     print '8'
                if cy > img.height*6 / 7:
                     arduino.write('8888')
                     print '888'
		break
######################################################

    cv.ShowImage("result", img)
Exemplo n.º 19
0
def detect_and_draw(img, cascade):

    # allocate temporary images

    gray = cv.CreateImage((img.width,img.height), 8, 1)

    #create a image with smaller size

    small_img = cv.CreateImage((cv.Round(img.width / image_scale),

                   cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale

    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
    
    # scale input image for faster processing

    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    #if algorithm is present

    if(cascade):

        #to get the current time

        t = cv.GetTickCount()

        #create memory for calculation(createMemStorage)

        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)

        #previous time minus current time

        t = cv.GetTickCount() - t

        print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))

        i=0

	#if more then one faces detected
        if faces:

		#getting all the coordinates of face
            for ((x, y, w, h), n) in faces:

                i=1;

                # the input to cv.HaarDetectObjects was resized, so scale the

                # bounding box of each face and convert it to two CvPoints

                pt1 = (int(x * image_scale), int(y * image_scale))

                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))

                
                #draw rectangle (imagename,topleft,bottomright,color,size)

                cv.Rectangle(img,pt1,pt2,(0,230,0),1)

                #crop the image

                var1 = img[y: y + h, x: x + w]

                cv.SaveImage("face/database/image.png",var1)

                name="face/database/image.png"

                img=Image.open(name).convert('LA')

                img.save(name)

                break;

        cv.DestroyAllWindows()

        if i == 1:

            os.system("python resize.py")


        if i == 0:

            os.remove("face/database/image.png")
Exemplo n.º 20
0
def detectFaces():
    global frame_copy, min_size, image_scale, haar_scale, min_neighbors, haar_flags, cap, cam_pan, cam_tilt
    t0 = cv.GetTickCount()
    frame = cv.QueryFrame(cap)
    if not frame:
        cv.WaitKey(0)
        return False
    if not frame_copy:
        frame_copy = cv.CreateImage((frame.width,frame.height),
                                            cv.IPL_DEPTH_8U, frame.nChannels)
    if frame.origin == cv.IPL_ORIGIN_TL:
        cv.Flip(frame, frame, -1)
   
    # Our operations on the frame come here
    gray = cv.CreateImage((frame.width,frame.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(frame.width / image_scale),
                   cv.Round (frame.height / image_scale)), 8, 1)
    small_img2 = cv.CreateImage((cv.Round(frame.width / image_scale),
                   cv.Round (frame.height / image_scale)), 8, 1)
    # convert color input image to grayscale
    cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)
 
    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
 
    cv.EqualizeHist(small_img, small_img)

    #flip the image for more convenient camera mounting
    cv.Flip(small_img,small_img2,-1)

    midFace = None
    t1 = cv.GetTickCount()
 
    if(cascade):
        t = cv.GetTickCount()
        # HaarDetectObjects takes 0.02s
        faces = cv.HaarDetectObjects(small_img2, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        if faces:
            #lights(50 if len(faces) == 0 else 0, 50 if len(faces) > 0 else 0,0,50)

            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
 #               cv.Rectangle(frame, pt1, pt2, cv.RGB(100, 220, 255), 1, 8, 0)
                # get the xy corner co-ords, calc the midFace location
                x1 = pt1[0]
                x2 = pt2[0]
                y1 = pt1[1]
                y2 = pt2[1]

                midFaceX = x1+((x2-x1)/2)
                midFaceY = y1+((y2-y1)/2)
                midFace = (midFaceX, midFaceY)

                offsetX = midFaceX / float(frame.width/2)
                offsetY = midFaceY / float(frame.height/2)
                offsetX -= 1
                offsetY -= 1

                cam_pan -= (offsetX * 5)
                cam_tilt += (offsetY * 5)
                cam_pan = max(0,min(180,cam_pan))
                cam_tilt = max(0,min(180,cam_tilt))

                print(offsetX, offsetY, midFace, cam_pan, cam_tilt, frame.width, frame.height)
                sys.stdout.flush()
 #               pan(int(cam_pan-90))
  #              tilt(int(cam_tilt-90))
                #break
 #   print "e"+str((t1-t0)/1000000)+"-"+str( (cv.GetTickCount()-t1)/1000000)
#    cv.ShowImage('Tracker',frame)
    if cv.WaitKey(1) & 0xFF == ord('q'):
        return False
    return True
Exemplo n.º 21
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    ######   若解析度有改變,下面劃線座標亦隨之改變##############
    cv.Line(img, (width / middle_w, 0), (width / middle_w, height),
            (0, 10, 255), 3)
    cv.Line(img, ((width / middle_w - 20), (height / middle_h - 10)),
            ((width / middle_w - 20), (height / middle_h + 10)), (0, 10, 255),
            2)
    cv.Line(img, ((width / middle_w + 20), (height / middle_h - 10)),
            ((width / middle_w + 20), (height / middle_h + 10)), (0, 10, 255),
            2)
    cv.Line(img, (0, height / middle_h), (width, height / middle_h),
            (0, 10, 255), 3)
    cv.Line(img, ((width / middle_w - 10), (height / middle_h - 20)),
            ((width / middle_w + 10), (height / middle_h - 20)), (0, 10, 255),
            2)
    cv.Line(img, ((width / middle_w - 10), (height / middle_h + 20)),
            ((width / middle_w + 10), (height / middle_h + 20)), (0, 10, 255),
            2)
    #cv.ShowImage("camera", img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                ##################################################################################################3333
                cx = (int(x * image_scale) + int((x + w) * image_scale)) / 2
                cy = (int(y * image_scale) + int((y + h) * image_scale)) / 2
                print cx, cy
                #将文字框加入到图片中,(5,30)定义了文字框左顶点在窗口中的位置,最后参数定义文字颜色
                '''
                if cx <= (width*2/3) and cx >= (width*1/3) and cy <= (height*2/3) and cy >= (height*1/3) :
                     TestStr = "Locking"
                     cv.PutText(img, TestStr , (5,30), font, (0,0,255))
                else:
                     TestStr = "serching...."
                     cv.PutText(img, TestStr , (160,30), font, (0,255,0))
                '''

                if cx <= (width * 4 / 7) and cx >= (width * 3 / 7) and cy <= (
                        height * 4 / 7) and cy >= (height * 3 / 7):
                    TestStr = "Locking"
                    cv.PutText(img, TestStr, (5, 30), font, (0, 0, 255))
                else:
                    TestStr = "serching...."
                    cv.PutText(img, TestStr, (160, 30), font, (0, 255, 0))

#################################################################################################################
                if cx < img.width * 3 / 7:
                    arduino.write('4')
                    print '4'
                if cx < img.width * 2 / 7:
                    arduino.write('44')
                    print '4'
                if cx < img.width / 7:
                    arduino.write('4444')
                    print '44'

                if cx > img.width * 4 / 7:
                    arduino.write('6')
                    print '6'
                if cx > img.width * 5 / 7:
                    arduino.write('66')
                    print '6'
                if cx > img.width * 6 / 7:
                    arduino.write('6666')
                    print '66'
                if cy < img.height * 3 / 7:
                    arduino.write('2')
                    print '2'
                if cy < img.height * 2 / 7:
                    arduino.write('22')
                    print '2'
                if cy < img.height / 7:
                    arduino.write('2222')
                    print '222'
                if cy > img.height * 4 / 7:
                    arduino.write('8')
                    print '8'
                if cy > img.height * 5 / 7:
                    arduino.write('88')
                    print '8'
                if cy > img.height * 6 / 7:
                    arduino.write('8888')
                    print '888'
                break


######################################################

    cv.ShowImage("result", img)
Exemplo n.º 22
0
    def detect_and_draw(self, originalImage):
        # allocate temporary images
        
        print type(originalImage)
        grayScaleFullImage = cv.CreateImage((originalImage.width, originalImage.height), 8, 1)
        smallScaleFullImage = cv.CreateImage((cv.Round(originalImage.width / image_scale),
                       cv.Round (originalImage.height / image_scale)), 8, 1)
    
        # convert color input image to grayscale
        cv.CvtColor(originalImage, grayScaleFullImage, cv.CV_BGR2GRAY)
    
        # scale input image for faster processing
        cv.Resize(grayScaleFullImage, smallScaleFullImage, cv.CV_INTER_LINEAR)
    
        cv.EqualizeHist(smallScaleFullImage, smallScaleFullImage)
    
        if(self.cascade):
            t = cv.GetTickCount()
            # detect faces
            faces = cv.HaarDetectObjects(smallScaleFullImage, self.cascade, cv.CreateMemStorage(0),
                                         haar_scale, min_neighbors, haar_flags, min_size)
            t = cv.GetTickCount() - t
            print "detection time = %gms" % (t / (cv.GetTickFrequency() * 1000.))
            if faces:
                print "detected face"
                for ((x, y, w, h), n) in faces:
                    # the input to cv.HaarDetectObjects was resized, so scale the
                    # bounding box of each face and convert it to two CvPoints
                    pt1 = (int(x * image_scale), int(y * image_scale))
                    pt11 = (int(x * image_scale) + 10, int(y * image_scale) + 10)
                    pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                    # face 
                    cv.Rectangle(originalImage, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                    
                    if isOpticalFlow:
                        originalArray2 = cv.CloneImage(originalImage)
                        faceArea = cv.GetSubRect(originalArray2, (pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]))
                        faceArea2 = cv.CloneMat(faceArea)
                        cv.ShowImage("face area", faceArea2)
                        self.MotionDetector.iterativeMotionDetector(faceArea2)
                                        
     
                    # get the center of the rectangle
                    centerX = (pt1[0] + pt2[0]) / 2     
                    centerY = (pt1[1] + pt2[1]) / 2 + int(0.1 * w * image_scale)
                      
                    # around nose region
                    cv.Rectangle(originalImage, (centerX, centerY), (centerX + 10, centerY + 10), cv.RGB(255, 0, 255))   
                    
                         
                    # detect left eye
                    # cv.SetZero(sub)  55
                    self.detectLeftEye(originalImage, self.cascade2, pt1, centerX, centerY)
                    
                    # detect right eye
                    rightEyeArea = cv.GetSubRect(originalImage, (centerX, pt1[1], pt2[0] - centerX  , centerY - pt1[1]))
                    # cv.SetZero(rightEyeArea)    
                    self.detectRightEye(originalImage, rightEyeArea, centerX, centerY, pt1, self.cascade2)
                    
#                     self.detectNose(originalImage, cascade4, centerX, centerY)
                    
                            
                    
                
                 
                     
                    
                    # now apply mask for values in range +/- 10% of index_1
                    # form a map for showing the eyebrows
                    # cloneImageArray = cv.CloneMat(imageArray)
                    # cloneImageArray = np.empty_like (imageArray)
                    # cloneImageArray[:] = imageArray
                    # cv2.imshow("left eye " ,cloneImageArray)
                
                    # res = cv2.bitwise_and(cloneImageArray,cloneImageArray,mask = backproj)
                    # cv2.imshow("res" ,res)
                
                
                    # detect left eyebrow
                    # by doing simple contour detection
    #                 print type(leftEyeArea)
    #                 gray_im = cv.CreateMat(leftEyeArea.height, leftEyeArea.width, cv.CV_8UC1)
    #                 #gray_im = cv.CreateImage((leftEyeArea.rows, leftEyeArea.cols), cv.IPL_DEPTH_8U, 1)
    #                 print type(gray_im)
    #                 cv.CvtColor(leftEyeArea, gray_im, cv.CV_RGB2GRAY)
    #                 imageArray = np.asarray(gray_im, dtype=np.uint8)
    #                 #floatMat.convertTo(ucharMat, CV_8UC1);
    # 
    #                 # scale values from 0..1 to 0..255
    #                 #floatMat.convertTo(ucharMatScaled, CV_8UC1, 255, 0); 
    #                 contours0, hier = cv2.findContours( backproj , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # #               
    # 
    #                 cv_im = cv.CreateMat(img.width, img.height, cv.CV_8UC3)
    #                 cv.SetData(cv_im, img.tostring())
    #                     
    #                 #print type(cv_im)
    #                 
    #                 originalImageArray = np.asarray(cv_im, dtype=np.uint8)
    #                 
    #                 print " length " + str(len(contours0))   
    #                 #print type(contours0)
    #                 
    #                 lines = None
    #                 linesList = list()
    #                 for item in contours0:
    #                     #print "item " + str(item)
    #                        
    #                     #print type(item)
    #                     for i in range(1, len(item)):
    #                         #for j in range(len(item[i][0])):
    #                         #print str(item[i][0][0]) + " " + str(item[i][0][1])
    #                         #lines.append([[item[i][0][0], item[i][0][1]]])
    #                         if lines != None:
    #                             np.append(lines, item[i][0])
    #                         else:
    #                             lines = np.array(item[i][0])
    #                         linesList.append((item[i][0][0] , item[i][0][1]))
    #                         #cv2.circle(backproj, ( item[i][0][0] , item[i][0][1]), 10, (255,255,255), 10)
    #                         #cv.Circle(img, (pt1[0] + item[i][0][0] ,int(pt1[1] * 1.1)+ item[i][0][1]), 5, (255,0,255))
    #                             
    #                             
    #                
    #                 
    #                 #print type(originalImageArray)
    #                 print lines
    #                 #cv2.polylines(originalImageArray, lines, True, cv.RGB(255, 255, 0), 10)
    #                 print type(linesList)
    #                 #cv.PolyLine(cv_im, linesList, False, cv.RGB(255, 255, 0), 10)
    #                 #cv2.drawContours(backproj, contours0, , cv.RGB(55, 55, 55))
                
                    
                    
                    # canny_output = None
                    # canny_output = cv2.Canny(backproj, 700, 1000, canny_output, 7)
                    # cv2.imshow("canny ", canny_output)
                    
                    # cv.Canny(hsv_image, contours0, 10, 60);
                    # contours, hier = cv2.findContours( canny_output , cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #               
    
    
    
                   
                    
                    
                    
    
                
                    # cv2.drawContours(originalImageArray,lines,-1,(0,255,0),3)
                   
                    # detect mouth
                    mouthArea = cv.GetSubRect(originalImage, (pt1[0], centerY, pt2[0] - pt1[0], pt2[1] - centerY))
                    self.detectMouth(originalImage, mouthArea, pt1, centerY, self.cascade3)
                    
                    
                    
                    
                    # start tracking face
                    if not isOpticalFlow:
                        originalArray2 = cv.CloneImage(originalImage)
                        faceArea = cv.GetSubRect(originalArray2, (pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]))
                        faceArea2 = cv.CloneMat(faceArea)
                        return (True, faceArea2, originalImage, pt1, pt2)
                        
#                         originalImage2 = cv.CloneImage(originalImage)
#                         camshift = Camshift()
#                         camshift.defineRegionOfInterest(originalImage2, pt1, pt2)

#                         originalArray2 = cv.CloneImage(originalImage)
#                         faceArea = cv.GetSubRect(originalArray2, (pt1[0], pt1[1], pt2[0] - pt1[0], pt2[1] - pt1[1]))
#                         faceArea2 = cv.CloneMat(faceArea)
#                         cv.ShowImage("face area", faceArea2)
#                         faceArray = np.asarray(faceArea2, np.uint8, 3)
#                         faceArray = cv2.cvtColor(faceArray, cv2.COLOR_BGR2GRAY)
#                         self.matcher.defineTargetImage(faceArray)
#                         self.matcher.findInVideoSequence()
                          
                    
                    
                              
                    
      
        cv.ShowImage("result", originalImage)
        
        return (False, originalImage, None, None, None)
Exemplo n.º 23
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "time taken for detection = %gms" % (
            t / (cv.GetTickFrequency() * 1000.))
    if faces:
        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

        cv.ShowImage("video", img)

    if __name__ == '__main__':

        parser = OptionParser(
            usage="usage: %prog [options] [filename|camera_index]")
        parser.add_option(
            "-c",
            "-cascade",
            action="store",
            dest="cascade",
            type="str",
            help="Haar cascade file, default %default",
            default="../data/haarcascades/haarcascade_frontalface_alt.xml")(
                options, args) = parser.parse_args()

    cascade = cv.Load(options.cascade)

    if len(args) != 1:
        parser.print_help()
        sys.exit(1)

    input_name = args[0]
    if input_name.isdigit():
        capture = cv.CreateCameraCapture(int(input_name))
    else:
        capture = None

    cv.NamedWindow("video", 1)

    #size of the video
    width = 160
    height = 120

    if width is None:
        width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
    else:
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)

    if height is None:
        height = int(
            cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
    else:
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)

    if capture:
        frame_copy = None
    while True:

        frame = cv.QueryFrame(capture)
        if not frame:
            cv.WaitKey(0)
            break
        if not frame_copy:
            frame_copy = cv.CreateImage((frame.width, frame.height),
                                        cv.IPL_DEPTH_8U, frame.nChannels)

        if frame.origin == cv.IPL_ORIGIN_TL:
            cv.Copy(frame, frame_copy)
        else:
            cv.Flip(frame, frame_copy, 0)

    detect_and_draw(frame_copy, cascade)

    if cv.WaitKey(10) >= 0:
        break
    else:
        image = cv.LoadImage(input_name, 1)
        detect_and_draw(image, cascade)
        cv.WaitKey(0)

    cv.DestroyWindow("video")
Exemplo n.º 24
0
    def get_frame(self):
        try:
            frame = cv.QueryFrame(self.camera)
            if not frame:
                print('Camera error')
                return
            if not self.frame_copy:
                self.frame_copy = cv.CreateImage((frame.width, frame.height),
                                                 cv.IPL_DEPTH_8U,
                                                 frame.nChannels)
            if frame.origin == cv.IPL_ORIGIN_TL:
                cv.Flip(frame, frame, -1)

            # Our operations on the frame come here
            gray = cv.CreateImage((frame.width, frame.height), 8, 1)
            small_img = cv.CreateImage(
                (cv.Round(frame.width / self.image_scale),
                 cv.Round(frame.height / self.image_scale)), 8, 1)

            # convert color input image to grayscale
            cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)

            # Scale input image for faster processing
            cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

            cv.EqualizeHist(small_img, small_img)

            midFace = None

            if (self.cascade):
                t = cv.GetTickCount()
                # Do haar detection
                faces = cv.HaarDetectObjects(small_img, self.cascade,
                                             cv.CreateMemStorage(0),
                                             self.haar_scale,
                                             self.min_neighbors,
                                             self.haar_flags, self.min_size)
                t = cv.GetTickCount() - t
                if faces:
                    if not os.path.isfile('face.jpg'):
                        # Save temporary image if no existing one
                        image = cv2.imencode('.jpeg',
                                             np.asarray(frame[:, :]))[1]
                        image = cv2.imdecode(image, cv2.IMREAD_COLOR)
                        cv2.imwrite('face.jpg', image)

                    for ((x, y, w, h), n) in faces:
                        # Resize the input, scale the bounding box of each face and convert to two
                        # CvPoints
                        pt1 = (int(x * self.image_scale),
                               int(y * self.image_scale))
                        pt2 = (int((x + w) * self.image_scale),
                               int((y + h) * self.image_scale))
                        cv.Rectangle(frame, pt1, pt2, cv.RGB(100, 220, 255), 1,
                                     8, 0)

                        # Calculate mid point of the face
                        x1, y1 = pt1
                        x2, y2 = pt2

                        midFaceX = x1 + ((x2 - x1) / 2)
                        midFaceY = y1 + ((y2 - y1) / 2)
                        midFace = (midFaceX, midFaceY)

                        # Calculate offset of camera angle
                        offsetX = midFaceX / float(frame.width / 2)
                        offsetY = midFaceY / float(frame.height / 2)
                        offsetX -= 1
                        offsetY -= 1

                        self.cam_pan -= (offsetX * 5)
                        self.cam_tilt += (offsetY * 5)
                        self.cam_pan = max(0, min(180, self.cam_pan))
                        self.cam_tilt = max(0, min(180, self.cam_tilt))

                        # Pan and tilt to the next position
                        pan(int(self.cam_pan - 90))
                        tilt(int(self.cam_tilt - 90))

            # Push processed framge image to flask
            image = cv2.imencode('.jpeg',
                                 np.asarray(frame[:, :]))[1].tostring()
            return image
        except Exception as e:
            print(e)
            return
        cmd="INSERT INTO Members(ID,Name) Values("+str(Id)+","+str(Name)+")"
    conn.execute(cmd)
    conn.commit()
    conn.close()
    
    
Id=raw_input('Enter the User ID')
Name=raw_input('Enter the User Name')

database="D:\IOT Project for Semester IV\Program\Face Recognition\Attendance.db"
insertOrUpdate(database)
sampleNum=0;
while(True):
    ret, img = cap.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    t = cv.GetTickCount()
    faces = detector.detectMultiScale(gray, 1.4, 5)
    t = cv.GetTickCount() - t
    for (x,y,w,h) in faces:
        cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
        sampleNum=sampleNum+1
        cv2.imwrite("dataset2/User."+Id +'.'+ str(sampleNum) + ".jpg", gray[y:y+h,x:x+w])
        
        cv2.imshow('frame',img)
    if(cv2.waitKey(100) & 0xFF == ord('q')):
        break
    elif(sampleNum>70):
        break
print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
print("Faces Found:",len(faces))
cap.release()
Exemplo n.º 26
0
#---------------------------

#Iterate throught pixels
red_sum = 0
green_sum = 0
blue_sum = 0
c = 0
for i in range(0, im.rows - 1):
    for j in range(0, im.cols - 1):
        c = c + 1
        red_sum += im[i, j][0]
        green_sum += im[i, j][1]
        blue_sum += im[i, j][2]
print red_sum, green_sum, blue_sum, c

dur = cv.GetTickCount()  #Calculate time between two points
print cv.GetTickCount() - dur

#2
li = cv.InitLineIterator(im, (0, 0), (im.rows, im.cols))
red_sum = 0
green_sum = 0
blue_sum = 0
c = 0
for (r, g, b) in li:
    red_sum += r
    green_sum += g
    blue_sum += b
    c = c + 1
print red_sum, green_sum, blue_sum, c