Exemple #1
0
    def found_face(self):
        # global frame_copy
        if (not self.camera_is_on()) or (not self.find_face_is_on()):
            return False

        self.flushCameraBuffer()  # this reduces the frame delay
        frame = cv.QueryFrame(self.capture)
        if frame is None:
            self.close_camera()
            return False

        if not frame:
            cv.WaitKey(0)
        if not self.frame_copy:
            self.frame_copy = cv.CreateImage((frame.width, frame.height),
                                             cv.IPL_DEPTH_8U, frame.nChannels)

        if frame.origin == cv.IPL_ORIGIN_TL:
            cv.Copy(frame, self.frame_copy)
        else:
            cv.Flip(frame, self.frame_copy, 0)

        if self.showVideo:
            result = self.detect_and_draw(self.frame_copy)
        else:
            result = self.detect_no_draw(self.frame_copy)
        cv.WaitKey(10)
        return result
Exemple #2
0
def scanner_procces(frame, set_zbar):
    set_width = 100.0 / 100
    set_height = 90.0 / 100

    coord_x = int(frame.width * (1 - set_width) / 2)
    coord_y = int(frame.height * (1 - set_height) / 2)
    width = int(frame.width * set_width)
    height = int(frame.height * set_height)

    get_sub = cv.GetSubRect(frame,
                            (coord_x + 1, coord_y + 1, width - 1, height - 1))

    cv.Rectangle(frame, (coord_x, coord_y),
                 (coord_x + width, coord_y + height), (255, 0, 0))

    cm_im = cv.CreateImage((get_sub.width, get_sub.height), cv.IPL_DEPTH_8U, 1)
    cv.ConvertImage(get_sub, cm_im)
    image = zbar.Image(cm_im.width, cm_im.height, 'Y800', cm_im.tostring())

    set_zbar.scan(image)
    for symbol in image:
        print '\033[1;32mResult : %s symbol "%s" \033[1;m' % (symbol.type,
                                                              symbol.data)

    cv.ShowImage("webcame", frame)
    #cv.ShowImage("webcame2", get_sub)
    cv.WaitKey(10)
Exemple #3
0
    def run(self):
        started = time.time()
        while True:

            currentframe = cv.QueryFrame(self.capture)
            instant = time.time()  #Get timestamp o the frame

            self.processImage(currentframe)  #Process the image

            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant  #Update the trigger_time
                    if instant > started + 10:  #Wait 5 second after the webcam start for luminosity adjusting etc..
                        print "Something is moving !"
                        if self.doRecord:  #set isRecording=True only if we record a video
                            self.isRecording = True
                cv.DrawContours(currentframe, self.currentcontours,
                                (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED)
            else:
                if instant >= self.trigger_time + 10:  #Record during 10 seconds
                    print "Stop recording"
                    self.isRecording = False
                else:
                    cv.PutText(currentframe,
                               datetime.now().strftime("%b %d, %H:%M:%S"),
                               (25, 30), self.font, 0)  #Put date on the frame
                    cv.WriteFrame(self.writer, currentframe)  #Write the frame

            if self.show:
                cv.ShowImage("Image", currentframe)

            c = cv.WaitKey(1) % 0x100
            if c == 27 or c == 10:  #Break if user enters 'Esc'.
                break
Exemple #4
0
    def pc_and_tape(self):
        # Execute vision code
        cap = cv.VideoCapture(0)
        
        # Initialize detectors
        pc = PC()
        
        while True:
            ret, frame = cap.read()
            
            last = 0

            c_names, c_values = pc.find_cubes_with_contours(frame)
            t_names, t_values = pc.find_cubes_with_contours(frame)
            self.send(t_names, t_values)

            # Keep old coordinates when cube is lost
            if c_values[0] == 0:
                self.send(c_names, [last])
            else:
                last = c_values[0]
                self.send(c_names, c_values)

            if cv2.WaitKey(1) ==  27:
                break
        cap.release()
        cv2.destroyAllWindows()
def grab_images(video_file, frame_inc=100, delay=100):
    """
    Walks through the entire video and save image for each increment
    """
    my_video = init_video(video_file)
    if my_video != None:
        # Display the video and save evry increment frames
        cpt = 0
        img = cv2.QueryFrame(my_video)

        if img != None:
            cv2.NamedWindow("Vid", cv2.CV_WINDOW_AUTOSIZE)
        else:
            return None

        nFrames = int(
            cv2.GetCaptureProperty(my_video, cv2.CV_CAP_PROP_FRAME_COUNT))
        while cpt < nFrames:
            for ii in range(frame_inc):
                img = cv2.QueryFrame(my_video)
                cpt += 1

            cv2.ShowImage("Vid", img)
            out_name = "" + str(cpt) + ".jpg"
            cv2.SaveImage(out_name, img)
            print out_name, str(nFrames)
            cv2.WaitKey(delay)
    else:
        return None
Exemple #6
0
    def run(self):
        started = time.time()
        while True:

            curframe = cv.QueryFrame(self.capture)
            instant = time.time()  #Get timestamp o the frame

            self.processImage(curframe)  #Process the image

            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant  #Update the trigger_time
                    if instant > started + 5:  #Wait 5 second after the webcam start for luminosity adjusting etc..
                        print("Something is moving !")
                        if self.doRecord:  #set isRecording=True only if we record a video
                            self.isRecording = True
            else:
                if instant >= self.trigger_time + 10:  #Record during 10 seconds
                    print("Stop recording")
                    self.isRecording = False
                else:
                    cv.PutText(curframe,
                               datetime.now().strftime("%b %d, %H:%M:%S"),
                               (25, 30), self.font, 0)  #Put date on the frame
                    cv.WriteFrame(self.writer, curframe)  #Write the frame

            if self.show:
                cv.ShowImage("Image", curframe)
                cv.ShowImage("Res", self.res)

            cv.Copy(self.frame2gray, self.frame1gray)
            c = cv.WaitKey(1)
            if c == 27 or c == 1048603:  #Break if user enters 'Esc'.
                break
def display_video(my_video, frame_inc=100, delay=100):
    """
    Displays frames of the video in a dumb way.
    Used to see if everything is working fine
    my_video = cv2Capture object
    frame_inc = Nmber of increments between each frame displayed
    delay = time delay between each image 
    """
    cpt = 0
    img = cv2.QueryFrame(my_video)

    if img != None:
        cv2.NamedWindow("Vid", cv2.CV_WINDOW_AUTOSIZE)
    else:
        return None

    nFrames = int(cv2.GetCaptureProperty(my_video,
                                         cv2.CV_CAP_PROP_FRAME_COUNT))
    while cpt < nFrames:
        for ii in range(frame_inc):
            img = cv2.QueryFrame(my_video)
            cpt + 1

        cv2.ShowImage("Vid", img)
        cv2.WaitKey(delay)
def display_img(img, delay=1000):
    """
    One liner that displays the given image on screen
    """
    cv2.NamedWindow("Vid", cv2.CV_WINDOW_AUTOSIZE)
    cv2.ShowImage("Vid", img)
    cv2.WaitKey(delay)
Exemple #9
0
def show(area):
    cv2.Rectangle(img, (area[0][0], area[0][1]),
                  (area[0][0] + area[0][2], area[0][1] + area[0][3]),
                  (255, 0, 0), 2)
    cv2.NamedWindow('Face Detection', cv2.CV_WINDOW_NORMAL)
    cv2.imread('Face Detection', img)
    cv2.WaitKey()
 def draw_model_fitter(f):
   cv.NamedWindow("Model Fitter", cv.CV_WINDOW_AUTOSIZE)
   # Copy image
   i = cv.CreateImage(cv.GetSize(f.image), f.image.depth, 3)
   cv.Copy(f.image, i)
   for pt_num, pt in enumerate(f.shape.pts):
     # Draw normals
     cv.Circle(i, (int(pt.x), int(pt.y)), 2, (0,0,0), -1)
   cv.ShowImage("Shape Model",i)
   cv.WaitKey()
class cvBridgeDemo():
    def __init__(self):
        self.node_name = "cv_bridge_demo"

        rospy.init_node(self.node_name)

        # What we do during shutdown
        rospy.on_shutdown(self.cleanup)

        # Create the OpenCV display window for the RGB image
        self.cv_window_name = self.node_name
        cv.NamedWindow(self.cv_window_name, cv.CV_WINDOW_NORMAL)
        cv.MoveWindow(self.cv_window_name, 25, 75)

        # And one for the depth image
        cv.NamedWindow("Depth Image", cv.CV_WINDOW_NORMAL)
        cv.MoveWindow("Depth Image", 25, 350)

        # Create the cv_bridge object
        self.bridge = CvBridge()

        # Subscribe to the camera image and depth topics and set
        # the appropriate callbacks
        self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image,
                                          self.image_callback)
        self.depth_sub = rospy.Subscriber("/camera/depth/image_raw", Image,
                                          self.depth_callback)

        rospy.loginfo("Waiting for image topics...")

    def image_callback(self, ros_image):
        # Use cv_bridge() to convert the ROS image to OpenCV format
        try:
            frame = self.bridge.imgmsg_to_cv(ros_image, "bgr8")
        except CvBridgeError, e:
            print e

        # Convert the image to a Numpy array since most cv2 functions
        # require Numpy arrays.
        frame = np.array(frame, dtype=np.uint8)

        # Process the frame using the process_image() function
        display_image = self.process_image(frame)

        # Display the image.
        cv2.imshow(self.node_name, display_image)

        # Process any keyboard commands
        self.keystroke = cv.WaitKey(5)
        if 32 <= self.keystroke and self.keystroke < 128:
            cc = chr(self.keystroke).lower()
            if cc == 'q':
                # The user has press the q key, so exit
                rospy.signal_shutdown("User hit q key to quit.")
def display_rgb(dev, data, timestamp):
    global keep_running
    cv.Image = frame_convert.video_cv(data)
    img = cv.CreateImage(cv.GetSize(cv.Image), cv.IPL_DEPTH_16S, 3)
    cv.ShowImage('RGB', cv.Image)
    for x in range(1, 5):
        name = "img%d" % (x)
        cv.SaveImage('name.png', cv.Image)
        time.sleep(1)
    if cv.WaitKey(10) == 27:
        keep_running = False
Exemple #13
0
def repeat():
    global capture
    global camera_index
    global count
    frame = cv2.GetMat(cv2.QueryFrame(capture))
    framegray = cv2.CreateMat(480, 640, cv2.CV_8UC1)
    cv2.CvtColor(frame, framegray, cv2.CV_BGR2GRAY)
    sys.stdout.write(framegray.tostring())
    c = cv2.WaitKey(1)
    if c == 27:
        print(count)
        sys.exit()
def show_images(images):
    """ Shows all images in a window"""
    if images == None:
        logging.error(
            'Cannot Show Images (No image saved). Image-Type: %s (tools.py)' %
            str(type(images).__name__))
    elif type(images).__name__ == 'list':
        for i in range(len(images)):
            print type(images[i])
            if type(images[i]).__name__ == 'ndarray':
                tmpimage = []
                tmpimage[i] = array2cv(images[i])
                cv.ShowImage("Image", tmpimage[i])
                if cv.WaitKey() == 27:
                    cv.DestroyWindow("Image")
            else:
                cv.ShowImage("Image", images[i])
                if cv.WaitKey() == 27:
                    cv.DestroyWindow("Image")
    elif type(images).__name__ == 'cvmat':
        cv.ShowImage("Image", images)
        if cv.WaitKey() == 27:
            cv.DestroyWindow("Image")
    elif type(images).__name__ == 'iplimage':
        cv.ShowImage("Image", images)
        if cv.WaitKey() == 27:
            cv.DestroyWindow("Image")
    elif type(images).__name__ == 'ndarray':
        images = array2cv(images)
        cv.ShowImage("Image", images)
        if cv.WaitKey() == 27:
            cv.DestroyWindow("test")
    elif type(images).__name__ == 'str':
        logging.error(
            'TypeError: Cannot Show Images (No image saved?). Image-Type: %s (tools.py)'
            % str(type(images).__name__))
    else:
        logging.error(
            'TypeError: Cannot Show Images. Image-Type: %s (tools.py)' %
            str(type(images).__name__))
Exemple #15
0
    def run(self):
        while True:
            img = self.capture.read()

            #blur the source image to reduce color noise
            cv2.blur(img, img, 3)

            #convert the image to hsv(Hue, Saturation, Value) so its
            #easier to determine the color to track(hue)
            hsv_img = cv2.CreateImage(cv2.GetSize(img), 8, 3)
            cv2.CvtColor(img, hsv_img, cv2.CV_BGR2HSV)

            #limit all pixels that don't match our criteria, in this case we are
            #looking for purple but if you want you can adjust the first value in
            #both turples which is the hue range(120,140).  OpenCV uses 0-180 as
            #a hue range for the HSV color model
            greenLower = (20, 190, 165)
            greenUpper = (30, 225, 220)
            thresholded_img = cv2.CreateImage(cv2.GetSize(hsv_img), 8, 1)
            cv2.InRangeS(hsv_img, greenLower, greenUpper, thresholded_img)

            #determine the objects moments and check that the area is large
            #enough to be our object
            moments = cv2.Moments(thresholded_img, 0)
            area = cv2.GetCentralMoment(moments, 0, 0)

            #there can be noise in the video so ignore objects with small areas
            if (area > 100000):
                #determine the x and y coordinates of the center of the object
                #we are tracking by dividing the 1, 0 and 0, 1 moments by the area
                x = cv2.GetSpatialMoment(moments, 1, 0) / area
                y = cv2.GetSpatialMoment(moments, 0, 1) / area

                #print 'x: ' + str(x) + ' y: ' + str(y) + ' area: ' + str(area)

                #create an overlay to mark the center of the tracked object
                overlay = cv2.CreateImage(cv2.GetSize(img), 8, 3)

                cv2.Circle(overlay, (x, y), 2, (255, 255, 255), 20)
                cv2.Add(img, overlay, img)
                #add the thresholded image back to the img so we can see what was
                #left after it was applied
                cv2.Merge(thresholded_img, None, None, None, img)

            #display the image
            cv2.ShowImage(color_tracker_window, img)

            if cv2.WaitKey(10) == 27:
                break
  def show_modes_of_variation(model, mode):
    # Get the limits of the animation
    start = -2*math.sqrt(model.evals[mode])
    stop = -start
    step = (stop - start) / 100

    b_all = np.zeros(model.modes)
    b = start
    while True:
      b_all[mode] = b
      s = model.generate_example(b_all)
      ShapeViewer.show_shapes([s])
      # Reverse direction when we get to the end to keep it running
      if (b < start and step < 0) or (b > stop and step > 0):
        step = -step
      b += step
      c = cv.WaitKey(10)
      if chr(255&c) == 'q': break
Exemple #17
0
def analyze_webcam(width, height):
    print("""
    ' ' : extract colors of detected face
    'b' : toggle onlyBlackCubes
    'd' : toggle dodetection
    'm' : shift right
    'n' : shift left
    'r' : reset everything
    'q' : print hsvs
    'p' : resolve colors
    'u' : toggle didassignments
    's' : save image
""")

    # 0 for laptop camera
    # 1 for usb camera
    capture = cv2.VideoCapture(0)

    # Set the capture resolution
    #cv2.SetCaptureProperty(capture, cv2.CV_CAP_PROP_FRAME_WIDTH, width)
    #cv2.SetCaptureProperty(capture, cv2.CV_CAP_PROP_FRAME_HEIGHT, height)

    # Create the window and set the size to match the capture resolution
    cv2.namedWindow("Fig", cv2.WINDOW_NORMAL)
    #cv2.ResizeWindow("Fig", width, height)

    # Capture frame-by-frame
    ret, frame = capture.read()
    rf = RubiksFinder(width, height)

    while True:
        ret, frame = capture.read()

        if not frame:
            cv2.WaitKey(0)
            break

        rf.analyze_frame(frame)

        if not rf.process_keyboard_input():
            break

    cv2.DestroyWindow("Fig")
 def onNextFrame(self,e):
     frPrStart = time.time()
     frameImg=[]
     frameImg.append(cFrame[:,:,0])#0
     #print [tObj.kID for tObj in tObjList.itervalues()]
     curr_frame_num_write = GlProp.vidstream.get(cv.CAP_PROP_POS_FRAMES) 
     ret,frame=GetGSFrame(GlProp.vidstream) #read() method advances the frame index by 1
     frameImg.append(frame)#1
     if len(GlProp.vidfile)>0:
         setpos=GlProp.vidstream.set(cv.CAP_PROP_POS_FRAMES,GlProp.fIdx+GlProp.fDir)
         GlProp.fIdx = GlProp.vidstream.get(cv.CAP_PROP_POS_FRAMES)
     else:
         setpos=True
     if ret & setpos & (GlProp.fIdx+GlProp.fDir > -1) & (GlProp.fIdx+GlProp.fDir < GlProp.vidstream.get(cv.CAP_PROP_FRAME_COUNT)):
         dFrame=CalculateBGDiff(cFrame[:,:,0],frame,GlProp.trckThrsh,GlProp.TrckParameter)
         frameImg.append(dFrame)#2
         CurContours = updateTracking(curr_frame_num_write,dFrame,minSize=GlProp.szThrsh,minDist=GlProp.mDist,currObjs=tObjList,trackData=GlProp.trackData)
         if GlProp.fIdx % GlProp.bgRatio == 0: #only update background if the frame # is a multiple of the bgRatio
             cFrame[:,:,0] = BGUpdateFnc(cFrame[:,:,0],frame,tObjList,CurContours,GlProp.alpha)
         if GlProp.cvCB["TrackWindow"] | (GlProp.trackingON & self.val_CB.GetValue()):
             frameImg.append(draw_tObjs(frame,CurContours,tObjList,GlProp.mDist))#3
         if GlProp.trackingON & self.val_CB.GetValue():
             GlProp.vidWriter.write(frameImg[GlProp.cvFrame["TrackWindow"]])
             cv.WaitKey(20)
         for cvWindow in GlProp.cvCB:
             if GlProp.cvCB[cvWindow]:
                 cv2.imshow(cvWindow,frameImg[GlProp.cvFrame[cvWindow]])
                 cv2.waitKey(1)
     else:
         GlProp.frametimer.Stop()
         GlProp.processratetimer.Stop()
         if GlProp.trackingON:
             self.stopTracking()
     #del frame
     #del frameImg
     GlProp.processRate = GlProp.processRate + np.array([1, time.time()-frPrStart])
	c = (float(imgSize[0]/2.0), float(imgSize[1]/2.0))
	imgRes = cv.CreateImage((rad*3, int(360)), 8, 3)
	#cv.LogPolar(image,imgRes,c,50.0, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS)
	cv.LogPolar(image,imgRes,c,60.0, cv.CV_INTER_LINEAR+cv.CV_WARP_FILL_OUTLIERS)
	return (imgRes)

# Window creation for showing input, output
cv.NamedWindow("input", cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow("output", cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow("normalized", cv.CV_WINDOW_AUTOSIZE)

eyesList = os.listdir('images/eyes')
key = 0
while True:
	eye = getNewEye(eyesList)
	frame = cv.LoadImage("images/eyes/"+eye)
	iris = cv.CloneImage(frame)
	output = getPupil(frame)
	iris = getIris(output)
	cv.ShowImage("input", frame)
	cv.ShowImage("output", iris)
	normImg = cv.CloneImage(iris)
	normImg = getPolar2CartImg(iris,radius)
	cv.ShowImage("normalized", normImg)
	key = cv.WaitKey(3000)
	# seems like Esc with NumLck equals 1048603
	if (key == 27 or key == 1048603):
		break

cv.DestroyAllWindows()
Exemple #20
0
while True:
    frameCounter += 1
    if cap.get(cv2.CAP_PROP_FRAME_COUNT) == frameCounter:
        cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
        frameCounter = 0

    _, img = cap.read()
    imgHsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    h_min = cv2.getTrackbarPos("HUE Min", "HSV")
    h_max = cv2.getTrackbarPos("HUE Max", "HSV")
    s_min = cv2.getTrackbarPos("SAT Min", "HSV")
    s_max = cv2.getTrackbarPos("SAT Max", "HSV")
    v_min = cv2.getTrackbarPos("VALUE Min", "HSV")
    v_max = cv2.getTrackbarPos("VALUE Max", "HSV")
    print(h_min)

    lower = np.array([h_min, s_min, v_min])
    upper = np.array([h_max, s_max, v_max])
    mask = cv2.inRange(imgHsv, lower, upper)
    result = cv2.bitwise_and(img, img, mask=mask)

    mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
    hStack = np.hstack([img, mask, result])
    cv2.imshow('Horizontal Stacking', hStack)
    if cv2.waitKey(1) and 0xFF == ord('q'):
        cv2.WaitKey(0)

cap.release()
cv2.destroyAllWindows()
Exemple #21
0
        frame = cv2.QueryFrame(capture)
        if frame is None:
            # no image captured... end the processing
            break
        #
        ### check OS
        if (osName == "nt"):
            cv2.Flip(frame, frame, 0)
        else:
            cv2.Flip(frame, None, 1)
        #
        ### detecting faces here
        detect_and_draw(frame, cascade)
        #
        ### handle key events
        k = cv2.WaitKey(5)
        if k % 0x100 == 27:
            # user has press the ESC key, so exit
            cv2.DestroyWindow('Camera')
            break

        import numpy as np
import cv2

# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades

#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml

cap = cv2.VideoCapture(0)
Exemple #22
0
from time import time as timer
import tensorflow as tf
import numpy as np
import sys
import cv2 as cv
import os
vidFile = cv.CaptureFromFile('Test_Avi')

nFrames = int(cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FRAME_COUNT))
fps = cv.GetCaptureProperty(vidFile, cv.CV_CAP_PROP_FPS)
waitPerFrameInMillisec = int(1 / fps * 1000 / 1)

print('Num. Frames = ', nFrames)
print('Frame Rate = ', fps, ' frames per sec')

for f in xrange(nFrames):
    frameImg = cv.QueryFrame(vidFile)
    cv.ShowImage("My Video Window", frameImg)
    cv.WaitKey(waitPerFrameInMillisec)

# When playing is done, delete the window
#  NOTE: this step is not strictly necessary,
#         when the script terminates it will close all windows it owns anyways
cv.DestroyWindow("My Video Window")
import serial  #import the pyserial module

#Module -1: Image Processing
hc = cv2.imread(
    '/home/george/PycharmProjects/Embeded image processing system/haarcascade_frontalface_alt2.xml'
)
img = cv2.imshow('/home/jayneil/beautiful-faces.jpg', 0)
faces = cv2.HaarDetectObjects(img, hc, cv2.CreateMemStorage())
a = 1
print(faces)
for (x, y, w, h), n in faces:
    cv2.Rectangle(img, (x, y), (x + w, y + h), 255)
cv2.SaveImage("faces_detected.jpg", img)
dst = cv2.imread('faces_detected.jpg')
cv2.NamedWindow('Face Detected', cv2.CV_WINDOW_AUTOSIZE)
cv2.imshow('Face Detected', dst)
cv2.WaitKey(5000)
cv2.DestroyWindow('Face Detected')

#Module -2: Trigger Pyserial
if faces == []:

    ser = serial.Serial('/dev/ttyUSB0', 9600)
    print(ser)
    ser.write('N')
else:

    ser = serial.Serial('/dev/ttyUSB0', 9600)
    print(ser)
    ser.write('Y')
Exemple #24
0
    def run(self):
        # Capture first frame to get size
        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        width = frame.width
        height = frame.height
        surface = width * height  #Surface area of the image
        cursurface = 0  #Hold the current surface that have changed

        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)
        moving_average = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_32F, 3)
        difference = None

        while True:
            color_image = cv.QueryFrame(self.capture)

            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 3,
                      0)  #Remove false positives

            if not difference:  #For the first time put values in difference, temp and moving_average
                difference = cv.CloneImage(color_image)
                temp = cv.CloneImage(color_image)
                cv.ConvertScale(color_image, moving_average, 1.0, 0.0)
            else:
                cv.RunningAvg(color_image, moving_average, 0.020,
                              None)  #Compute the average

            # Convert the scale of the moving average.
            cv.ConvertScale(moving_average, temp, 1.0, 0.0)

            # Minus the current frame from the moving average.
            cv.AbsDiff(color_image, temp, difference)

            #Convert the image so that it can be thresholded
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)
            cv.Threshold(grey_image, grey_image, 70, 255, cv.CV_THRESH_BINARY)

            cv.Dilate(grey_image, grey_image, None, 18)  #to get object blobs
            cv.Erode(grey_image, grey_image, None, 10)

            # Find contours
            storage = cv.CreateMemStorage(0)
            contours = cv.FindContours(grey_image, storage,
                                       cv.CV_RETR_EXTERNAL,
                                       cv.CV_CHAIN_APPROX_SIMPLE)

            backcontours = contours  #Save contours

            while contours:  #For all contours compute the area
                cursurface += cv.ContourArea(contours)
                contours = contours.h_next()

            avg = (
                cursurface * 100
            ) / surface  #Calculate the average of contour area on the total size
            if avg > self.ceil:
                print("Something is moving !")
            #print avg,"%"
            cursurface = 0  #Put back the current surface to 0

            #Draw the contours on the image
            _red = (0, 0, 255)
            #Red for external contours
            _green = (0, 255, 0)
            # Gren internal contours
            levels = 1  #1 contours drawn, 2 internal contours as well, 3 ...
            cv.DrawContours(color_image, backcontours, _red, _green, levels, 2,
                            cv.CV_FILLED)

            cv.ShowImage("Target", color_image)

            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break
Exemple #25
0
def main():

    global current_image
    global current_img_file_name
    global has_roi
    global roi_x0
    global roi_y0
    global roi_x1
    global roi_y1

    iKey = 0

    files = glob.glob(image_file_glob)
    if len(files) == 0:
        print("No files match glob pattern")
        return

    files = [os.path.abspath(f) for f in files]
    files.sort()

    # init GUI
    cv.NamedWindow(window_name, 1)
    cv.SetMouseCallback(window_name, on_mouse, None)

    sys.stderr.write("Opening directory...")
    # init output of rectangles to the info file
    #os.chdir(input_directory)
    sys.stderr.write("done.\n")

    current_file_index = 0

    while True:

        current_img_file_name = files[current_file_index]

        num_of_rec = 0
        sys.stderr.write(
            "Loading current_image (%d/%d) %s...\n" %
            (current_file_index + 1, len(files), current_img_file_name))

        try:
            current_image = cv.LoadImage(current_img_file_name, 1)
        except IOError:
            sys.stderr.write("Failed to load current_image %s.\n" %
                             current_img_file_name)
            return -1

        #  Work on current current_image
        #cv.ShowImage(window_name, current_image)
        redraw()

        # Need to figure out waitkey returns.
        # <Space> =  32     add rectangle to current image
        # <left>  =  81     save & next
        # <right> =  83     save & prev
        # <a>     =  97     add rect to table
        # <b>     =  98     toggle file is background or not
        # <d>     = 100     remove old rect
        # <q>     = 113     exit program
        # <s>     = 115     save rect table
        # <x>     = 136     skip image
        iKey = cv.WaitKey(0) % 255
        # This is ugly, but is actually a simplification of the C++.
        #sys.stderr.write(str(iKey) + '\n')
        if draging:
            continue

        if iKey == 81:
            current_file_index -= 1
            if current_file_index == -1:
                current_file_index = len(files) - 1
            clear_roi()
        elif iKey == 83:
            current_file_index += 1
            if current_file_index == len(files):
                current_file_index = 0
            clear_roi()
        elif iKey == 113:
            cv.DestroyWindow(window_name)
            return 0
        elif iKey == 97:
            rect_table.setdefault(current_img_file_name, set()).add(
                (roi_x0, roi_y0, roi_x1 - roi_x0, roi_y1 - roi_y0))
            clear_roi()
            write_rect_table()
            redraw()
        elif iKey == 98:
            if current_img_file_name in background_files:
                background_files.remove(current_img_file_name)
            else:
                background_files.add(current_img_file_name)
        elif iKey == 100:
            remove_rect(cur_mouse_x, cur_mouse_y)
        elif iKey == 115:
            write_rect_table()
        elif iKey == 136:
            sys.stderr.write("Skipped %s.\n" % current_file_index)
Exemple #26
0
 def run(self):
     self.on_segment()
     cv.WaitKey(0)
for (rho, theta) in lines[:100]:
    a = math.cos(theta)  #Calculate orientation in order to print them
    b = math.sin(theta)
    x0 = a * rho
    y0 = b * rho
    pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
    pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
    cv.Line(color_dst_standard, pt1, pt2, cv.CV_RGB(255, 0, 0), 2,
            4)  #Draw the line

#---- Probabilistic ----
color_dst_proba = cv.CreateImage(cv.GetSize(im), 8, 3)
cv.CvtColor(im, color_dst_proba, cv.CV_GRAY2BGR)  # idem

rho = 1
theta = pi / 180
thresh = 50
minLength = 120  # Values can be changed approximately to fit your image edges
maxGap = 20

lines = cv.HoughLines2(dst, cv.CreateMemStorage(0), cv.CV_HOUGH_PROBABILISTIC,
                       rho, theta, thresh, minLength, maxGap)
for line in lines:
    cv.Line(color_dst_proba, line[0], line[1], cv.CV_RGB(255, 0, 0), 2, 8)

cv.ShowImage('Image', im)
cv.ShowImage("Cannied", dst)
cv.ShowImage("Hough Standard", color_dst_standard)
cv.ShowImage("Hough Probabilistic", color_dst_proba)
cv.WaitKey(0)
while True:
    ret, frame = VideoCapture.read()

    #The above code reads the video frame bye frame

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = FaceCascade.detectMultiScale(gray,
                                         scaleFactor=1.1,
                                         minNeighbors=5,
                                         minSize=(30, 30),
                                         flags=cv2.cv.CV_HAAR_SCALE_IMAGE)

    #The above snippet is used to search the face in the video

    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.imshow("Video", frame)

#The above snippet is used to draw rectangles around the face

    if cv2.WaitKey(1) & 0xFF == ord('q'):
        break

#The above code needs the user to press 'q' to quit "You can Keep any variable instead of 'q'"

VideoCapture.release()
cv2.destroyAllWindows()

#The above code is used to terminate the video.
            # Display the received image
            shape = metadata['shape']
            nchannels = metadata['nChannels']
            depth = metadata['depth']
            digest = metadata['md5']
            h = hashlib.md5()
            h.update(binary_data)
            dig = h.hexdigest()
            if dig == digest:
                print "Correct MD5 sum on binary data: %s" % dig
            else:
                print "Incorrect MD5 sum: %s  (should be %s)" % (dig, digest)
            img = cv.CreateImageHeader(shape, depth, nchannels)
            cv.SetData(img, binary_data)
            cv.ShowImage(name, img)
            cv.WaitKey(30)

        if not server:
            # Send an image to the server
            img = random.choice(images)
            metadata = {
                "shape": (img.width, img.height),
                "nChannels": img.nChannels,
                "depth": img.depth
            }
            binary_data = img.tostring()
            h = hashlib.md5()
            h.update(binary_data)
            metadata['md5'] = h.hexdigest()
            print "Sending image with checksum: %s" % metadata['md5']
            client.send(metadata, binary_data)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, 1280)
cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, 720)
frame = cv.QueryFrame(capture)
test = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.NamedWindow("output")
previous_x = 0
previous_y = 0
while (1):
    frame = cv.QueryFrame(capture)
    cv.Flip(frame, frame, 1)
    # we make all drawings on imdraw.
    imdraw = cv.CreateImage(cv.GetSize(frame), 8, 3)
    # we get coordinates from imgyellowthresh
    imgyellowthresh = getthresholdedimg(frame)
    # eroding removes small noises
    cv.Erode(imgyellowthresh, imgyellowthresh, None, 1)
    (leftmost, rightmost, topmost, bottommost) = getpositions(imgyellowthresh)
    if (leftmost - rightmost != 0) or (topmost - bottommost != 0):
        lastx = posx
        lasty = posy
        posx = cv.Round((rightmost + leftmost) / 2)
        posy = cv.Round((bottommost + topmost) / 2)
        if lastx != 0 and lasty != 0:
            win32api.SetCursorPos((posx, posy))

    cv.Add(test, imdraw, test)
    cv.ShowImage("output", test)
    if cv.WaitKey(10) >= 0:
        break
cv.DestroyWindow("output")