Esempio n. 1
0
    def __init__(self, threshold=8, doRecord=True, showWindows=True):
        self.writer = None
        self.font = None
        self.doRecord = doRecord  #Either or not record the moving object
        self.show = showWindows  #Either or not show the 2 windows
        self.frame = None
        self.frame_rate = 5
        self.frame_width = 640
        self.frame_height = 480

        self.capture = cv.VideoCapture(0)
        self.frame = self.capture.read()  #Take a frame to init recorder
        if doRecord:
            self.initRecorder()

        self.frame1gray = cv.CreateMat(self.frame.height, self.frame.width,
                                       cv.CV_8U)  #Gray frame at t-1
        cv.CvtColor(self.frame, self.frame1gray, cv.CV_RGB2GRAY)

        #Will hold the thresholded result
        self.res = cv.CreateMat(self.frame.height, self.frame.width, cv.CV_8U)

        self.frame2gray = cv.CreateMat(self.frame.height, self.frame.width,
                                       cv.CV_8U)  #Gray frame at t

        self.width = self.frame.width
        self.height = self.frame.height
        self.nb_pixels = self.width * self.height
        self.threshold = threshold
        self.isRecording = False
        self.trigger_time = 0  #Hold timestamp of the last detection

        if showWindows:
            cv.NamedWindow("Image")
            cv.CreateTrackbar("Detection treshold: ", "Image", self.threshold,
                              100, self.onChange)
Esempio n. 2
0
import cv2
import numpy as np
import face_recognition

imgElon = face_recognition.load_image_file("Database/Elon.jpg")
imgElon = cv2.CvtColor(imgElon, cv2.COLOR_BGR2RBG)
imgTest = face_recognition.load_image_file("test.jpg")
imgTest = cv2.CvtColor(imgElon, cv2.COLOR_BGR2RBG)

faceLoc = face_recognition.face_location(imgElon)[0]
EncodeElon = face_recognition.face_encodings(imgElon)[0]
cv2.rectangel(imgElon, (faceLoc[3], faceLoc[0]), (faceLoc[1], faceLoc[2]),
              (0, 255, 0), 2)

cv2.imshow("Elon Database", imgElon)
cv2.imshow("Elon Selfie", imgTest)
cv2.waitKey(0)
Esempio n. 3
0
    def run(self):
        #initiate font
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
        # instantiate images
        hsv_img = cv.CreateImage(cv.GetSize(cv.QueryFrame(self.capture)), 8, 3)
        threshold_img1 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        threshold_img1a = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        threshold_img2 = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
        i = 0
        writer = cv.CreateVideoWriter('angle_tracking.avi', cv.CV_FOURCC('M', 'J', 'P', 'G'), 30, cv.GetSize(hsv_img), 1)

        while True:
            # capture the image from the cam
            img = cv.QueryFrame(self.capture)

            # convert the image to HSV
            cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

            # threshold the image to isolate two colors
            cv.InRangeS(hsv_img, (165, 145, 100), (250, 210, 160), threshold_img1)  # red
            cv.InRangeS(hsv_img, (0, 145, 100), (10, 210, 160), threshold_img1a)  # red again
            cv.Add(threshold_img1, threshold_img1a, threshold_img1)  # this is combining the two limits for red
            cv.InRangeS(hsv_img, (105, 180, 40), (120, 260, 100), threshold_img2)  # blue

            # determine the moments of the two objects
            threshold_img1 = cv.GetMat(threshold_img1)
            threshold_img2 = cv.GetMat(threshold_img2)
            moments1 = cv.Moments(threshold_img1, 0)
            moments2 = cv.Moments(threshold_img2, 0)
            area1 = cv.GetCentralMoment(moments1, 0, 0)
            area2 = cv.GetCentralMoment(moments2, 0, 0)

            # initialize x and y
            x1, y1, x2, y2 = (1, 2, 3, 4)
            coord_list = [x1, y1, x2, y2]
            for x in coord_list:
                x = 0

            # there can be noise in the video so ignore objects with small areas
            if (area1 > 200000):
                # x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
                x1 = int(cv.GetSpatialMoment(moments1, 1, 0) / area1)
                y1 = int(cv.GetSpatialMoment(moments1, 0, 1) / area1)

            # draw circle
            cv.Circle(img, (x1, y1), 2, (0, 255, 0), 20)

            # write x and y position
            cv.PutText(img, str(x1) +', '+str(y1), (x1, y1 + 20), font, 255)  # Draw the text

            if (area2 > 100000):
                # x and y coordinates of the center of the object is found by dividing the 1,0 and 0,1 moments by the area
                x2 = int(cv.GetSpatialMoment(moments2, 1, 0) / area2)
                y2 = int(cv.GetSpatialMoment(moments2, 0, 1) / area2)

                # draw circle
                cv.Circle(img, (x2, y2), 2, (0, 255, 0), 20)

            cv.PutText(img, str(x2) +', '+str(y2), (x2, y2 + 20), font, 255)  # Draw the text
            cv.Line(img, (x1, y1), (x2, y2), (0, 255, 0), 4, cv.CV_AA)
            # draw line and angle
            cv.Line(img, (x1, y1), (cv.GetSize(img)[0], y1), (100, 100, 100, 100), 4, cv.CV_AA)
            x1 = float(x1)
            y1 = float(y1)
            x2 = float(x2)
            y2 = float(y2)
            angle = int(math.atan((y1 - y2) / (x2 - x1)) * 180 / math.pi)
            cv.PutText(img, str(angle), (int(x1) + 50, (int(y2) + int(y1)) / 2), font, 255)

            # cv.WriteFrame(writer,img)

            # display frames to users
            cv.ShowImage('Target', img)
            cv.ShowImage('Threshold1', threshold_img1)
            cv.ShowImage('Threshold2', threshold_img2)
            cv.ShowImage('hsv', hsv_img)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
            cv.DestroyAllWindows()
Esempio n. 4
0
# coding:UTF-8

import cv2 as cv
from src import App

image = cv.imread('D:/image/test.png', 0)
cv.imshow("Original", image)

grey = cv.CreateImage((image.width, image.height), 8,
                      1)  # 8depth, 1 channel so grayscale
cv.CvtColor(image, grey, cv.CV_RGBA2GRAY)  # Convert to gray so act as a filter
cv.imshow('Greyed', grey)

# 平滑变换
smoothed = cv.CloneImage(image)
cv.Smooth(image, smoothed, cv.CV_MEDIAN
          )  # Apply a smooth alogrithm with the specified algorithm cv.MEDIAN
import cv2 as cv

img = cv.LoadImage("friend1.jpg")

image_size = cv.GetSize(img)  #获取图片的大小
greyscale = cv.CreateImage(image_size, 8, 1)  #建立一个相同大小的灰度图像
cv.CvtColor(img, greyscale, cv.CV_BGR2GRAY)  #将获取的彩色图像,转换成灰度图像
storage = cv.CreateMemStorage(0)  #创建一个内存空间,人脸检测是要利用,具体作用不清楚

cv.EqualizeHist(greyscale, greyscale)  #将灰度图像直方图均衡化,貌似可以使灰度图像信息量减少,加快检测速度
# detect objects
cascade = cv.Load('haarcascade_frontalface_alt2.xml')  #加载Intel公司的训练库

#检测图片中的人脸,并返回一个包含了人脸信息的对象faces
faces = cv.HaarDetectObjects(greyscale, cascade, storage, 1.2, 2,
                             cv.CV_HAAR_DO_CANNY_PRUNING, (50, 50))

#获得人脸所在位置的数据
j = 0  #记录个数
for (x, y, w, h), n in faces:
    j += 1
    cv.SetImageROI(img, (x, y, w, h))  #获取头像的区域
    cv.SaveImage("face" + str(j) + ".jpg", img)
    #保存下来
Esempio n. 6
0
def cvimage_grayscale(cv_image):
    """Converts a cvimage into grayscale"""
    grayscale = cv2.CreateImage(cv2.GetSize(cv_image), 8, 1)
    cv2.CvtColor(cv_image, grayscale, cv2.COLOR_RGB2GRAY)
    return grayscale
import cv2 as cv
import math

im = cv.imread('D:/1.jpg', cv.CV_LOAD_IMAGE_GRAYSCALE)

pi = math.pi  #Pi value

dst = cv.CreateImage(cv.GetSize(im), 8, 1)

cv.Canny(im, dst, 200, 200)
cv.Threshold(dst, dst, 100, 255, cv.CV_THRESH_BINARY)

#---- Standard ----
color_dst_standard = cv.CreateImage(cv.GetSize(im), 8, 3)
cv.CvtColor(im, color_dst_standard,
            cv.CV_GRAY2BGR)  #Create output image in RGB to put red lines

lines = cv.HoughLines2(dst, cv.CreateMemStorage(0), cv.CV_HOUGH_STANDARD, 1,
                       pi / 180, 100, 0, 0)
for (rho, theta) in lines[:100]:
    a = math.cos(theta)  #Calculate orientation in order to print them
    b = math.sin(theta)
    x0 = a * rho
    y0 = b * rho
    pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
    pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
    cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2,
            4)  #Draw the line

#---- Probabilistic ----
color_dst_proba = cv.CreateImage(cv.GetSize(im), 8, 3)
Esempio n. 8
0
def detect_and_draw(img, cascade):
    t = cv2.GetTickCount()  ## start counter
    cv2.CvtColor(img, gray, cv2.CV_BGR2GRAY)
    cv2.Resize(gray, small_img, cv2.CV_INTER_LINEAR)

    #Ages all trackedFaces
    for f in trackedFaces:
        f.updateLife()
    #Remove expired faces
    for f in trackedFaces:
        if (f.isTooOld()):
            trackedFaces.remove(f)

    faces = cv2.HaarDetectObjects(small_img, cascade, storage, haar_scale,
                                  min_neighbors, haar_flags, min_size)
    drawline = 0
    if faces:
        #found a face
        for ((x, y, w, h), n) in faces:
            matchedFace = False
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            pt3 = (int(x * image_scale) + int(
                ((x + w) * image_scale - x * image_scale) / 3),
                   int(y * image_scale))
            pt4 = (int((x + w) * image_scale) - int(
                ((x + w) * image_scale - x * image_scale) / 3),
                   int((y * image_scale) + int((
                       (y + h) * image_scale) - int(y * image_scale)) / 3))

            #check if there are trackedFaces
            if (len(trackedFaces) > 0):
                #each face being tracked
                for f in trackedFaces:
                    #the face is found (small movement)
                    if ((abs(f.xpt - pt1[0]) < FACE_MAX_MOVEMENT)
                            and (abs(f.ypt - pt1[1]) < FACE_MAX_MOVEMENT)):
                        matchedFace = True
                        f.updateFace(int(w * image_scale),
                                     int(h * image_scale), pt1[0], pt1[1])
                        mf = f
                        break

                #if face not found, add a new face
                if (matchedFace == False):
                    f = Face(0, int(w * image_scale), int(h * image_scale),
                             pt1[0], pt1[1], 0)
                    trackedFaces.append(f)
                    mf = f
            #No tracked faces: adding one
            else:
                f = Face(0, int(w * image_scale), int(h * image_scale), pt1[0],
                         pt1[1], 0)
                trackedFaces.append(f)
                mf = f
            #where to draw face and properties
            if (mf.age > 5):

                #draw attention line
                lnpt1 = (int(mf.xpt * scale), int(mf.ypt * scale - 5) - 5)
                if (mf.age > mf.width):
                    lnpt2 = (int(mf.xpt * scale + mf.width),
                             int(mf.ypt * scale - 5))
                else:
                    lnpt2 = (int(mf.xpt * scale + mf.age),
                             int(mf.ypt * scale - 5))

                cv2.Rectangle(img, lnpt1, lnpt2, RED, 4, 8,
                              0)  ## drawing bolded attention line

                ### draw eyes
                cv2.Rectangle(img, mf.eyeLeft1, mf.eyeLeft2, MAGENTA, 3, 8, 0)
                cv2.Rectangle(img, mf.eyeRight1, mf.eyeRight2, MAGENTA, 3, 8,
                              0)
                #
                ### draw mouth
                cv2.Rectangle(img, mf.mouthTopLeft, mf.mouthBotRight, ORANGE,
                              3, 8, 0)
                #
                ### draw face
                cv2.Rectangle(img, pt1, pt2, getColor(mf), 3, 8, 0)
                #cv2.Rectangle( img, pt3, pt4, MAGENTA, 1, 8, 0 ) #forehead
                drawline = mf.age

    if (CAPTURING): saveAsJPG(img)
    if (osName == "nt"): cv2.Flip(img, img, 0)
    cv2.ShowImage('Camera', img)
    t = cv2.GetTickCount() - t  ## counter for FPS
    print("%i fps." % (cv2.GetTickFrequency() * 1000000. / t))  ## print FPS
Esempio n. 9
0
def landmarkdetect(IP, PORT, camID):

    camProxy = ALProxy("ALVideoDevice", IP, PORT)

    resolution = 2  # VGA``
    colorSpace = 11  # RGB
    videoClient = camProxy.subscribe("python_client", resolution, colorSpace,
                                     5)

    camProxy.setParam(18, camID)

    naoImage = camProxy.getImageRemote(videoClient)
    camProxy.unsubscribe(videoClient)
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]

    array = naoImage[6]

    im_cv = numpy.zeros((imageHeight, imageWidth, 3), numpy.uint8)

    im_cv.data = array

    #im_cv = cv2.imread("img3.jpg",1)
    b, g, r = cv2.split(im_cv)
    img1 = cv2.merge([r, g, b])
    img3 = cv2.fromarray(img1)
    #img3=cv.LoadImage("img3.jpg",1)
    cv.SaveImage("save1.jpg", img3)
    imgHSV = cv.CreateImage(cv.GetSize(img3), 8, 3)
    cv.CvtColor(img3, imgHSV, cv.CV_RGB2HSV)

    cimg, cimg_c = hsvProceed(imgHSV)
    # cv.ShowImage("imgHSV", imgHSV)
    # cv.ShowImage("cimg", cimg)
    # cv.WaitKey(5)

    #
    # img3 = cv.LoadImage("save1.jpg",1)
    # imgHSV = cv.CreateImage(cv.GetSize(img3), 8, 3)
    #
    # cv.CvtColor(img3, imgHSV, cv.CV_RGB2HSV)
    # cimg, cimg_c=hsvProceed(imgHSV)
    # cv.ShowImage("s",cimg_c)

    storage = cv2.CreateMemStorage(0)
    cnts = cv.FindContours(cimg, storage, cv2.RETR_LIST,
                           cv2.CHAIN_APPROX_SIMPLE)
    currtnt = cnts
    x = 0.0
    Area = 0
    left_right = 0
    up_down = 0
    if camID == 0:
        areamax = 6000
        areamin = 350
        value = img3.height / 7
    else:
        areamax = 5000
        areamin = 400
        value = 0

    while cnts:
        rect = cv2.BoundingRect(cnts, 0)
        area = rect[2] * rect[3]
        rect_center_x = rect[0] + rect[2] / 2
        rect_center_y = rect[1] + rect[3] / 2
        # if camID == 1:
        #  radio_c = choose0.radio(cimg_c,rect)
        # if camID == 0:
        #  radio_c = choose0.choose02(cimg_c,rect)

        radio = float(rect[2]) / rect[3]
        if rect[1] > 10:

            if area > areamin:
                if area < areamax:
                    if radio > 0.1:
                        if radio < 1.0:
                            #if radio_c ==   1:

                            # cv2.cv.DrawContours(img3, cnts, (255, 255, 0), (255, 255, 0), 0, 1)
                            # cv2.cv.Rectangle(img3,(rect[0],rect[1]),(rect[0]+rect[2],rect[1]+rect[3]),(0,0,255),1)

                            #choose0.radio(cimg_c,rect)
                            rect_center_x = rect[0] + rect[2] / 2
                            rect_center_y = rect[1] + rect[3] / 2

                            Area = rect[2] * rect[3]
                            left_right = rect_center_x - cimg.width / 2
                            up_down = rect_center_y - cimg.height / 2
                            x, y = getloacation(rect[2], left_right)

        cnts = cnts.h_next()

    # cv2.cv.ShowImage("fsfs", img3)
    # cv.WaitKey(1)
    return Area, left_right, x
Esempio n. 10
0
    contourscopy = contours
    rectangleList = []
    while contourscopy:
        x, y, w, h = cv.BoundingRect(contourscopy)
        rectangleList.append((x, y, w, h))
        contourscopy = contourscopy.h_next()
    return rectangleList


if __name__ == "__main__":
    orig = cv.LoadImage(
        r"C:\git\Python-Snippets\Image Recognition\images\D2C-Logins - RunDate 2019-10-03 - Part (22).image"
    )
    #Convert in black and white
    res = cv.CreateImage(cv.GetSize(orig), 8, 1)
    cv.CvtColor(orig, res, cv.CV_BGR2GRAY)

    #Operations on the image
    openCloseImage(res)
    dilateImage(res, 2)
    erodeImage(res, 2)
    smoothImage(res, 5)
    thresholdImage(res, 150, cv.CV_THRESH_BINARY_INV)

    #Get contours approximated
    contourLow = getContours(res, 3)

    #Draw them on an empty image
    final = cv.CreateImage(cv.GetSize(res), 8, 1)
    cv.Zero(final)
    cv.DrawContours(final, contourLow, cv.Scalar(255), cv.Scalar(255), 2,
import argparse
import matplotlib.pyplot as plt
import cv2

# Parse command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("image", help="Path to image to be displayed")
parser.add_argument("--mask",
                    "-m",
                    action="store_true",
                    help="Flag indicating whether this image is a mask")

args = parser.parse_args()

# Load the image using cv2
if args.mask:
    img = cv2.imread(args.image, cv2.IMREAD_GRAYSCALE)
else:
    img = cv2.CvtColor(cv2.imread(args.image), cv2.COLOR_BGR2RGB)

# Process the image such that it is in the right format
#img /= 255
#img = img.astype('float32')

# Display the image using matplotlib
if args.mask:
    plt.imshow(img, cmap='gray')
else:
    plt.imshow(img)
plt.show()
Esempio n. 12
0
def showNaoImage(IP, PORT,camID):#参数分别为IP、PORT、摄像头ID(区分上下摄像头)

    #链接nao的摄像头
    camProxy = ALProxy("ALVideoDevice", IP, PORT)


    resolution = 2  # VGA``
    colorSpace = 11  # RGB
    videoClient = camProxy.subscribe("python_client",  resolution, colorSpace, 5)#设置分辨率、帧速、颜色空间

    t0 = time.time()
    camProxy.setParam(18,camID)#设置摄像头

    naoImage = camProxy.getImageRemote(videoClient)#将获取的图像赋给naoImage
    t1 = time.time()

    camProxy.unsubscribe(videoClient)
    imageWidth = naoImage[0]
    imageHeight = naoImage[1]
    array = naoImage[6]  #naoImage[6]为imagedata


    im_cv = numpy.zeros((imageHeight, imageWidth, 3), numpy.uint8)#初始化图像im_cv
    
    im_cv.data = array  #将从摄像头获取的图像copy到im_cv,转为mat

    #转化颜色空间由BGR到RGB
    b, g, r = cv2.split(im_cv)
    img1 = cv2.merge([r, g, b])
	#转mat到cvmat
    img3 = cv2.cv.fromarray(img1)
    cv2.SaveImage("test22.bmp",img3)
    #转换颜色空间到HSV
    imgHSV = cv2.CreateImage(cv2.GetSize(img3), 8, 3)
    cv2.CvtColor(img3, imgHSV, cv2.CV_RGB2HSV)

    cimg,cimg_c=hsvProceed(imgHSV,camID) #调用hsvProceed处理图像,返回二值图
    #圈取最小矩形框
	#初始化
    storage = cv2.cv.CreateMemStorage(0)
    cnts = cv2.FindContours(cimg,storage,cv2.cv.CV_RETR_LIST,cv2.cv.CV_CHAIN_APPROX_SIMPLE)
    currtnt=cnts
    Area = 0
    left_right = 0
    up_down = 0
	#为不同摄像头设置不同筛选条件
    if camID == 0:
      areamax = 2500
      areamin = 40
      valuemin = 25
      value_w = 641
      valuemax = 481
    else :
      areamax = 5000
      areamin = 400
      valuemin = 0
      value_w = 500
      valuemax = 400

    while cnts:
        rect = cv2.cv.BoundingRect(cnts,0)#获得单连通矩形框
        area = rect[2]*rect[3] #获得矩形框面积
		#获得矩形框中心点坐标
        rect_center_x = rect[0] + rect[2] / 2
        rect_center_y = rect[1] + rect[3] / 2
        #调用choose0文件下的radio函数,筛选圆形部分
        radio_c = choose0.radio(cimg_c,rect)
       
        radio = float(rect[2])/rect[3] #计算矩形框的长宽比 
        #以下if语句均为筛选条件
        if rect[1]>=valuemin:
         if rect[1]<=valuemax:
          if rect[0]<=value_w:
           if area > areamin:
             if area < areamax:
                if radio > 0.6:
                    if radio < 1.6:
                      if radio_c == 1:
                       cv2.cv.DrawContours(img3, cnts, (255, 255, 0), (255, 255, 0), 0, 1)#画出单连通轮廓
                       cv2.cv.Rectangle(img3,(rect[0],rect[1]),(rect[0]+rect[2],rect[1]+rect[3]),(0,0,255),1)#画出矩形框

                       
                       rect_center_x = rect[0] + rect[2]/2
                       rect_center_y = rect[1] + rect[3]/2
                       #计算通过条件的矩形框的面积以及在图像中的位置
                       Area = rect[2]*rect[3]
                       left_right = rect_center_x - cimg.width / 2
                       up_down = rect_center_y - cimg.height / 2
                       

        cnts = cnts.h_next()

    
    return Area,left_right,up_down #返回球的面积以及在图像中的位置
Esempio n. 13
0
def CamGui():
    capture = cv.VideoCapture(0)
    width = int(capture.get(cv.CAP_PROP_FRAME_WIDTH))
    height = int(capture.get(cv.CAP_PROP_FRAME_HEIGHT))
    prev_gray = cv.CreateImage((width, height), 8, 1)
    gray = cv.CreateImage((width, height), 8, 1)

    # Will hold the pyr frame at t-1
    prevPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)
    currPyr = cv.CreateImage((height / 3, width + 8),
                             8, cv.CV_8UC1)  # idem at t

    max_count = 500
    qLevel = 0.01
    minDist = 10
    prev_points = []  # Points at t-1
    curr_points = []  # Points at t
    lines = []  # To keep all the lines overtime

    while True:
        frame = cv.QueryFrame(capture)
        cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)  # Convert to gray
        output = cv.CloneImage(frame)

        prev_points = cv.GoodFeaturesToTrack(
            gray, None, None, max_count, qLevel, minDist)
        curr_points, status, err = cv.CalcOpticalFlowPyrLK(
            prev_gray, gray, prevPyr, currPyr, prev_points, (10, 10), 3, (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0)

        # If points status are ok and distance not negligible keep the point
        k = 0
        for i in range(len(curr_points)):
            nb = abs(int(prev_points[i][0]) - int(curr_points[i][0])) + \
                abs(int(prev_points[i][1]) - int(curr_points[i][1]))
            if status[i] and nb > 2:
                prev_points[k] = prev_points[i]
                curr_points[k] = curr_points[i]
                k += 1

        prev_points = prev_points[:k]
        curr_points = curr_points[:k]
        # At the end only interesting points are kept

        # Draw all the previously kept lines otherwise they would be lost the next frame
        for (pt1, pt2) in lines:
            cv.Line(frame, pt1, pt2, (255, 255, 255))

        # Draw the lines between each points at t-1 and t
        for prevpoint, point in zip(prev_points, curr_points):
            prevpoint = (int(prevpoint[0]), int(prevpoint[1]))
            cv.Circle(frame, prevpoint, 15, 0)
            point = (int(point[0]), int(point[1]))
            cv.Circle(frame, point, 3, 255)
            cv.Line(frame, prevpoint, point, (255, 255, 255))
            # Append current lines to the lines list
            lines.append((prevpoint, point))

        cv.Copy(gray, prev_gray)  # Put the current frame prev_gray
        prev_points = curr_points

        cv.ShowImage("The Video", frame)
        #cv.WriteFrame(writer, frame)
        c = cv.WaitKey(1)
        if c == 27:  # Esc on Windows
            break
Esempio n. 14
0
def cvimage_to_pygame(image):
    """Convert cvimage into a pygame image"""
    image_rgb = cv2.CreateMat(image._height, image._width, cv2.CV_8UC3)
    cv2.CvtColor(image, image_rgb, cv2.COLOR_BGR2RGB)
    return pygame.image.frombuffer(image.tostring(), cv2.GetSize(image_rgb),
                                   "RGB")
Esempio n. 15
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        width = frame.width
        height = frame.height
        surface = width * height  #Surface area of the image
        cursurface = 0  #Hold the current surface that have changed

        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        difference = None

        while True:
            color_image = cv.QueryFrame(self.capture)

            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3,
                      0)  #Remove false positives

            if not difference:  #For the first time put values in difference, temp and moving_average
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
            else:
                cv.RunningAvg(color_image, moving_average, 0.020,
                              None)  #Compute the average

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            #Convert the image so that it can be thresholded
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            cv.Dilate(grey_image, grey_image, None, 18)  #to get object blobs
            cv.Erode(grey_image, grey_image, None, 10)

            # Find contours
            storage = cv.CreateMemStorage(0)
            contours = cv.FindContours(grey_image, storage,
                                       cv.CV_RETR_EXTERNAL,
                                       cv.CV_CHAIN_APPROX_SIMPLE)

            backcontours = contours  #Save contours

            while contours:  #For all contours compute the area
                cursurface += cv.ContourArea(contours)
                contours = contours.h_next()

            avg = (
                cursurface * 100
            ) / surface  #Calculate the average of contour area on the total size
            if avg > self.ceil:
                print("Something is moving !")
            #print avg,"%"
            cursurface = 0  #Put back the current surface to 0

            #Draw the contours on the image
            _red = (0, 0, 255)
            #Red for external contours
            _green = (0, 255, 0)
            # Gren internal contours
            levels = 1  #1 contours drawn, 2 internal contours as well, 3 ...
            cv.DrawContours(color_image, backcontours, _red, _green, levels, 2,
                            cv.CV_FILLED)

            cv.ShowImage("Target", color_image)

            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
Esempio n. 16
0
 def updateVideo(self):
     ret, frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
     if ret:
         cv.CvtColor(frame, frame, cv.CV_BGR2RGB)
         self.bmp.CopyFromBuffer(frame.tostring())
         self.Refresh()
Esempio n. 17
0
        cv.waitKey(0)
        break
    if not frame_copy:
        frame_copy = cv.CreateImage((frame.width, frame.height),
                                    cv.IPL_DEPTH_8U, frame.nChannels)
    if frame.origin == cv.IPL_ORIGIN_TL:
        cv.Flip(frame, frame, -1)

    # Our operations on the frame come here
    gray = cv.CreateImage((frame.width, frame.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        frame.width / image_scale), cv.Round(frame.height / image_scale)), 8,
                               1)

    # convert color input image to grayscale
    cv.CvtColor(frame, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    midFace = None

    if (cascade):
        t = cv.GetTickCount()
        # HaarDetectObjects takes 0.02s
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
Esempio n. 18
0
    def detectFace(self, cam_img, faceCascade, eyeCascade, mouthCascade):  # cam_img should be cv2.cv.iplcam_img
        min_size = (20, 20)
        image_scale = 2
        haar_scale = 1.2
        min_neighbors = 2
        haar_flags = 0
        image_width = int(cam_img.get(cv.CV_CAP_PROP_FRAME_WIDTH))
        image_height = int(cam_img.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
        # Allocate the temporary images
        gray = cv.CreateImage((image_width, image_height), 8, 1)  # tuple as the first arg
        smallImage = cv.CreateImage((cv.Round(image_width / image_scale), cv.Round(image_height / image_scale)), 8, 1)

        (ok, img) = cam_img.read()
        # print 'gray is of ',type(gray) >>> gray is of  <type 'cv2.cv.iplimage'>
        # print type(smallImage)  >>> <type 'cv2.cv.iplimage'>
        # print type(image) >>> <type 'cv2.VideoCapture'>
        # print type(img) >>> <type 'numpy.ndarray'>

        # convert numpy.ndarray to iplimage
        ipl_img = cv2.cv.CreateImageHeader((img.shape[1], img.shape[0]), cv.IPL_DEPTH_8U, 3)
        cv2.cv.SetData(ipl_img, img.tostring(), img.dtype.itemsize * 3 * img.shape[1])

        # Convert color input image to grayscale
        cv.CvtColor(ipl_img, gray, cv.CV_BGR2GRAY)

        # Scale input image for faster processing
        cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)

        # Equalize the histogram
        cv.EqualizeHist(smallImage, smallImage)

        # Detect the faces
        faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        # => The function returns a list of tuples, (rect, neighbors) , where rect is a CvRect specifying the object’s extents and neighbors is a number of neighbors.
        # => CvRect cvRect(int x, int y, int width, int height)
        # If faces are found
        if faces:
            face = faces[0]
            self.faceX = face[0][0]
            self.faceY = face[0][1]

            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(ipl_img, pt1, pt2, cv.RGB(0, 0, 255), 3, 8, 0)
                # face_region = cv.GetSubRect(ipl_img,(x,int(y + (h/4)),w,int(h/2)))

            cv.SetImageROI(ipl_img, (pt1[0],
                                     pt1[1],
                                     pt2[0] - pt1[0],
                                     int((pt2[1] - pt1[1]) * 0.7)))

            eyes = cv.HaarDetectObjects(ipl_img, eyeCascade,
                                        cv.CreateMemStorage(0),
                                        haar_scale, min_neighbors,
                                        haar_flags, (15, 15))

            if eyes:
                # For each eye found
                for eye in eyes:
                    # Draw a rectangle around the eye
                    cv.Rectangle(ipl_img,  # image
                                 (eye[0][0],  # vertex pt1
                                  eye[0][1]),
                                 (eye[0][0] + eye[0][2],  # vertex pt2 opposite to pt1
                                  eye[0][1] + eye[0][3]),
                                 cv.RGB(255, 0, 0), 1, 4, 0)  # color,thickness,lineType(8,4,cv.CV_AA),shift

        cv.ResetImageROI(ipl_img)

        return ipl_img
def meanshiftUsingYUV(path):
    im = cv2.LoadImageM(path)
    cv2.CvtColor(im, im, cv2.CV_BGR2YCrCb)
    (segmentedImage, labelsImage, numberRegions) = pms.segmentMeanShift(im)
    print("number of region", numberRegions)
    return segmentedImage
Esempio n. 20
0
def getthresholdedimg(im):
    imghsv = cv.CreateImage(cv.GetSize(im), 8, 3)
    cv.CvtColor(im, imghsv, cv.CV_BGR2HSV)  # Convert image from RGB to HSV
    imgthreshold = cv.CreateImage(cv.GetSize(im), 8, 1)
    cv.InRangeS(imghsv, cv.Scalar(23, 100, 100), cv.Scalar(25, 255, 255), imgthreshold)  ## catch the orange yellow blob
    return imgthreshold
lr = LogisticRegression(solver='saga', multi_class='multinomial')

lr.fit(xTrainScale, yTrain)

yPredict = lr.predict(xTestScale)

accuracy = accuracy_score(yTest, yPredict)
print(accuracy)

video = cv2.VideoCapture(0)

while (True):
    try:
        ret, frame = video.read()
        gray = cv2.CvtColor(frame, cv2.COLOR_BGR2GRAY)
        height, width = gray.shape
        upperLeft = (int(width / 2 - 60), int(height / 2 - 60))
        bottomRight = (int(width / 2 + 60), int(height / 2 + 60))
        cv2.rectange(gray, upperLeft, bottomRight, (0, 255, 0), 2)
        roi = gray[upperLeft[1]:bottomRight[1], upperLeft[0]:bottomRight[0]]
        imagePil = Image.fromarray(roi)
        imageBw = imagePil.convert('L')
        imageBwResize = imageBw.resize((28, 28), Image.ANTIALIAS)
        imageBwResizeInverted = PIL.ImageOps.invert(imageBwResize)
        pixelFilter = 20
        minPixel = np.percentile(imageBwResizeInverted, pixelFilter)
        imageBwResizeInvertedScaled = np.clip(imageBwResizeInverted - minPixel,
                                              0, 255)
        maxPixel = np.max(imageBwResizeInverted)
        imageArray = np.asarray(imageBwResizeInvertedScaled) / maxPixel