コード例 #1
0
def expand_video(capture,
                 writer,
                 final_frames_number,
                 duplicate_step,
                 verbose=False):
    current_frame = 0
    added_frames = 0
    while current_frame < final_frames_number:
        if verbose:
            my_string = "%.2f" % (float(current_frame) / final_frames_number *
                                  100)
            print current_frame, '/', final_frames_number, my_string + "%\r",

        frame = cv.QueryFrame(capture)
        if frame is None:
            break

        cv.WriteFrame(writer, frame)
        current_frame += 1

        if current_frame % duplicate_step == 0:
            added_frames += 1
            current_frame += 1
            cv.WriteFrame(writer, frame)

    return added_frames
コード例 #2
0
def main():
    global degree_value
    global pi
    global s
    global fps
    if getArgs() == 1:
        return
    degree_value = 2 * pi / n
    #drawImage(0)
    #return
    writer = cv.CreateVideoWriter("original.avi", 0, 7 * s, (width, height))
    writer2 = cv.CreateVideoWriter("output.avi", 0, fps, (width, height))
    nFrames = int(14 * s)
    addon = pi * 2 / 7
    c = addon
    drawImage(0)
    for i in range(nFrames):
        c = c + addon
        drawImage(c)
        img2 = cv.LoadImage(img_name)
        cv.WriteFrame(writer, img2)

    nFrames = int(2 * fps)
    addon2 = pi * 2 * s / fps
    c2 = addon2
    drawImage(0)
    for j in range(nFrames):
        c2 = c2 + addon2
        drawImage(c2)
        img2 = cv.LoadImage(img_name)
        cv.WriteFrame(writer2, img2)
コード例 #3
0
def reduce_video(capture,
                 writer,
                 final_frames_number,
                 skip_step,
                 verbose=False):
    current_frame = 0
    removed_frames = 0
    while current_frame < final_frames_number:
        if verbose:
            my_string = "%.2f" % (float(current_frame) / final_frames_number *
                                  100)
            print current_frame, '/', final_frames_number, my_string + "%\r",

        frame = cv.QueryFrame(capture)
        if frame is None:
            break

        cv.WriteFrame(writer, frame)
        current_frame += 1

        if current_frame % skip_step == 0:
            removed_frames += 1
            frame = cv.QueryFrame(capture)
            if frame is None:
                break

    return removed_frames
コード例 #4
0
    def run(self):
        started = time.time()
        while True:

            currentframe = cv.QueryFrame(self.capture)
            instant = time.time()  #Get timestamp o the frame

            self.processImage(currentframe)  #Process the image

            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant  #Update the trigger_time
                    if instant > started + 10:  #Wait 5 second after the webcam start for luminosity adjusting etc..
                        print("Something is moving !")
                        if self.doRecord:  #set isRecording=True only if we record a video
                            self.isRecording = True
                cv.DrawContours(currentframe, self.currentcontours,
                                (0, 0, 255), (0, 255, 0), 1, 2, cv.CV_FILLED)
            else:
                if instant >= self.trigger_time + 10:  #Record during 10 seconds
                    print("Stop recording")
                    self.isRecording = False
                else:
                    cv.PutText(currentframe,
                               datetime.now().strftime("%b %d, %H:%M:%S"),
                               (25, 30), self.font, 0)  #Put date on the frame
                    cv.WriteFrame(self.writer, currentframe)  #Write the frame

            if self.show:
                cv.ShowImage("Image", currentframe)

            c = cv.WaitKey(1) % 0x100
            if c == 27 or c == 10:  #Break if user enters 'Esc'.
                break
コード例 #5
0
ファイル: motion3.py プロジェクト: mateodurante/taller3
    def run(self):
        started = time.time()
        while True:

            curframe = cv.QueryFrame(self.capture)
            instant = time.time() #Get timestamp o the frame

            self.processImage(curframe) #Process the image

            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant #Update the trigger_time
                    if instant > started +5:#Wait 5 second after the webcam start for luminosity adjusting etc..
                        print "Something is moving !"
                        if self.doRecord: #set isRecording=True only if we record a video
                            self.isRecording = True
            else:
                if instant >= self.trigger_time +10: #Record during 10 seconds
                    print "Stop recording"
                    self.isRecording = False
                else:
                    cv.PutText(curframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame
                    cv.WriteFrame(self.writer, curframe) #Write the frame

            if self.show:
                cv.ShowImage("Image", curframe)
                cv.ShowImage("Res", self.res)

            cv.Copy(self.frame2gray, self.frame1gray)
            c=cv.WaitKey(1)
            if c==27 or c == 1048603: #Break if user enters 'Esc'.
                break
コード例 #6
0
ファイル: grabber.py プロジェクト: Steadroy/infographics-1
    def initGrabQt(self):

        image_qt = QtGui.QPixmap.grabWidget(self.view).toImage()
        image_qt_size = (image_qt.size().width(), image_qt.size().height())

        cv_im_4chan = cv.CreateImageHeader(image_qt_size, cv.IPL_DEPTH_8U, 4)
        cv_im = cv.CreateImage(image_qt_size, cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_im_4chan, image_qt.bits().asstring(image_qt.numBytes()))
        cv.CvtColor(cv_im_4chan, cv_im, cv.CV_RGBA2RGB)

        fourcc = cv.CV_FOURCC('D','I','V','X')
        fps = 25
        width, height = cv.GetSize(cv_im)
        self.writer = cv.CreateVideoWriter('out3.avi', fourcc, fps, (int(width), int(height)), 1)

        cv.WriteFrame(self.writer, cv_im)

        timer = QtCore.QTimer()
        time_interval = 1000 / 25
        timer.setInterval(time_interval)
        timer.timeout.connect(self.grabFrameQt)
        timer.start()
        self.timer = timer

        self.stopTimer = QtCore.QTimer()
        self.stopTimer.setInterval(self.total_time)
        self.stopTimer.timeout.connect(self.stopCapture)
        self.stopTimer.setSingleShot(True)
        self.stopTimer.start()
コード例 #7
0
ファイル: grabber.py プロジェクト: Steadroy/infographics-1
    def initGrab(self):

        image = ImageGrab.grab(self.geometry)
        cv_im = cv.CreateImageHeader(image.size, cv.IPL_DEPTH_8U, 3)

        cv.SetData(cv_im, image.tostring())
        cv.CvtColor(cv_im, cv_im, cv.CV_RGB2BGR)

        fourcc = cv.CV_FOURCC('D','I','V','X')
        fps = 25
        width, height = cv.GetSize(cv_im)
        #print width, height
        self.writer = cv.CreateVideoWriter('out3.avi', fourcc, fps, (int(width), int(height)), 1)

        cv.WriteFrame(self.writer, cv_im)

        self.frames_count = 1

        timer = QtCore.QTimer()
        time_interval = 1000 / 25
        timer.setInterval(time_interval)
        timer.timeout.connect(self.grabFrame)
        timer.start()
        self.timer = timer

        self.stopTimer = QtCore.QTimer()
        self.stopTimer.setInterval(self.total_time)
        self.stopTimer.timeout.connect(self.stopCapture)
        self.stopTimer.setSingleShot(True)
        self.stopTimer.start()
コード例 #8
0
ファイル: grabber.py プロジェクト: Steadroy/infographics-1
    def grabFrame(self):

        image = ImageGrab.grab(self.geometry)
        cv_im = cv.CreateImageHeader(image.size, cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_im, image.tostring())

        cv.WriteFrame(self.writer, cv_im)

        self.frames_count += 1
        print self.frames_count
コード例 #9
0
ファイル: grabber.py プロジェクト: Steadroy/infographics-1
    def grabFrameQt(self):
        image_qt = QtGui.QPixmap.grabWidget(self.view).toImage()
        image_qt_size = (image_qt.size().width(), image_qt.size().height())

        cv_im_4chan = cv.CreateImageHeader(image_qt_size, cv.IPL_DEPTH_8U, 4)
        cv_im = cv.CreateImage(image_qt_size, cv.IPL_DEPTH_8U, 3)
        cv.SetData(cv_im_4chan, image_qt.bits().asstring(image_qt.numBytes()))
        cv.CvtColor(cv_im_4chan, cv_im, cv.CV_RGBA2RGB)

        cv.WriteFrame(self.writer, cv_im)
コード例 #10
0
    def run(self):
        started = time.time()
        while True:
            
            curframe = cv.QueryFrame(self.capture)
            instant = time.time() #Get timestamp o the frame
            
            self.processImage(curframe) #Process the image
            
            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant #Update the trigger_time
                    if instant > started +5:#Wait 5 second after the webcam start for luminosity adjusting etc..
                        print datetime.now().strftime("%b %d, %H:%M:%S"), "Something is moving !"
			host = 'localhost'
			port = 50000
			size = 1024
    			s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    			s.connect((host,port))  
			s.send('stream')
			while True :
    				if not cv.GrabFrame(self.capture) : break
    				frame = cv.RetrieveFrame(self.capture)
    				sys.stdout.write( frame.tostring() )

                        if self.doRecord: #set isRecording=True only if we record a video
                            self.isRecording = True
            else:
                if instant >= self.trigger_time +10: #Record during 10 seconds
                    print datetime.now().strftime("%b %d, %H:%M:%S"), "Stop recording"
                    self.isRecording = False
                else:
                    cv.PutText(curframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame
                    cv.WriteFrame(self.writer, curframe) #Write the frame
            
            if self.show:
                cv.ShowImage("Image", curframe)
                cv.ShowImage("Res", self.res)
                
            cv.Copy(self.frame2gray, self.frame1gray)
            c=cv.WaitKey(1) % 0x100
            if c==27 or c == 10: #Break if user enters 'Esc'.
                break
コード例 #11
0
ファイル: main.py プロジェクト: Steadroy/infographics-1
    def grabFrame(self):

        start = time.clock()
        image_qt = QtGui.QPixmap.grabWidget(self.view).toImage()
        #image = ImageGrab.grab(self.geometry)
        image_qt_size = (image_qt.size().width(), image_qt.size().height())
        cv_im_4chan = cv.CreateImageHeader(image_qt_size, cv.IPL_DEPTH_8U, 4)
        cv_im = cv.CreateImage(image_qt_size, cv.IPL_DEPTH_8U, 3)

        cv.SetData(cv_im_4chan, image_qt.bits().asstring(image_qt.numBytes()))
        cv.CvtColor(cv_im_4chan, cv_im, cv.CV_RGBA2RGB)
        elapsed = time.clock()
        elapsed -= start
        #print "Time spent in (Qt image grab) is: %0.3f ms" % elapsed

        start = time.time()
        cv.WriteFrame(self.writer, cv_im)
        elapsed = time.time()
        elapsed = elapsed - start
        # print "Time spent in (Write Frame) is:%0.3f ms " % elapsed * 1000

        self.frames_count += 1
コード例 #12
0
ファイル: main.py プロジェクト: Steadroy/infographics-1
    def initGrab(self):
        start = time.clock()
        elapsed = time.clock()
        elapsed -= start
        #print "Time spent in (Qt image grab) is: %0.3f ms\n" % (elapsed * 1000)

        image_qt = QtGui.QPixmap.grabWidget(self.view)
        image_qt_i = image_qt.toImage()
        i2 = image_qt_i.convertToFormat(QtGui.QImage.Format_RGB888)
        i3 = i2.rgbSwapped()
        i3_bits = i3.bits()
        image_qt_size = (i3.size().width(), i3.size().height())
        #image = ImageGrab.grab(self.geometry)
        cv_im = cv.CreateImageHeader(image_qt_size, cv.IPL_DEPTH_8U, 3)

        cv.SetData(cv_im, i3_bits.asstring(i3.numBytes()))

        fourcc = cv.CV_FOURCC('D', 'I', 'V', 'X')
        fps = 25
        width, height = cv.GetSize(cv_im)
        #print width, height
        self.writer = cv.CreateVideoWriter('out3.avi', int(fourcc), fps,
                                           (int(width), int(height)), 1)

        start = time.time()
        cv.WriteFrame(self.writer, cv_im)
        elapsed = time.time()
        elapsed -= start
        #print "Time spent in (Write Frame) is:%0.3f ms \n" % (elapsed * 1000)

        self.frames_count = 1

        timer = QtCore.QTimer()
        time_interval = 1000 / 25
        timer.setInterval(time_interval)
        timer.timeout.connect(self.grabFrame)
        timer.start()
        self.timer = timer
コード例 #13
0
    def run(self):
	time.sleep(5)
	started=0
        while True:

            curframe = cv.QueryFrame(self.capture)
            instant = time.time() #Get timestamp o the frame

            self.processImage(curframe) #Process the image

            if not self.isRecording:
                if self.somethingHasMoved():
                    self.trigger_time = instant #Update the trigger_time
                    if time.time() > started + 10:
			started = time.time()
                        print "Something is moving !"
			cv.SaveImage('intruder'+str(self.img)+".jpg", curframe)
			sendImage(self.img)
			self.img=self.img+1
                        if self.doRecord: #set isRecording=True only if we record a video
                            self.isRecording = True

            else:
                if instant >= self.trigger_time +10: #Record during 10 seconds
                    print "Stop recording"
                    self.isRecording = False
                else:
                    cv.PutText(curframe,datetime.now().strftime("%b %d, %H:%M:%S"), (25,30),self.font, 0) #Put date on the frame
                    cv.WriteFrame(self.writer, curframe) #Write the frame

            if self.show:
                cv.ShowImage("Image", curframe)
                cv.ShowImage("Res", self.res)

            cv.Copy(self.frame2gray, self.frame1gray)
            c=cv.WaitKey(1)
            if c==27 or c == 1048603: #Break if user enters 'Esc'.
                break
コード例 #14
0
        (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0)

    #If points status are ok and distance not negligible keep the point
    k = 0
    for i in range(len(curr_points)):
        nb = abs(int(prev_points[i][0]) - int(curr_points[i][0])) + abs(
            int(prev_points[i][1]) - int(curr_points[i][1]))
        if status[i] and nb > 2:
            initial[k] = initial[i]
            curr_points[k] = curr_points[i]
            k += 1

    curr_points = curr_points[:k]
    initial = initial[:k]
    #At the end only interesting points are kept

    #Draw the line between the first position of a point and the
    #last recorded position of the same point
    for i in range(len(curr_points)):
        cv.Line(output, (int(initial[i][0]), int(initial[i][1])),
                (int(curr_points[i][0]), int(curr_points[i][1])),
                (255, 255, 255))
        cv.Circle(output, (int(curr_points[i][0]), int(curr_points[i][1])), 3,
                  (255, 255, 255))

    cv.Copy(gray, prev_gray)
    prev_points = curr_points

    cv.ShowImage("The Video", output)
    cv.WriteFrame(writer, output)
    cv.WaitKey(wait)
コード例 #15
0
ファイル: img1_3.py プロジェクト: Inspiring26/328
import cv2.cv as cv

capture = cv.CaptureFromCAM(0)
temp = cv.QueryFrame(capture)
writer = cv.CreateVideoWriter("output.avi", cv.CV_FOURCC('D', 'I', 'V', 'X'),
                              15, cv.GetSize(temp), 1)
count = 0
while count < 50:
    print count
    image = cv.QueryFrame(capture)
    cv.WriteFrame(writer, image)
    cv.ShowImage("image_windows", image)
    cv.WaitKey(1)
    count += 1
コード例 #16
0
 def writeImage(self):
     if(self._videoWriter != None):
         cv.WriteFrame(self._videoWriter, cv.GetImage(self._currentImage))
コード例 #17
0
ファイル: fun1.py プロジェクト: a550461053/GMCM2017
    # print cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_POS_FRAMES) # frame == index

    # 2. 当前帧frame2赋给frame2gray
    frame2 = cv.QueryFrame(capture)
    cv.CvtColor(frame2, frame2gray, cv.CV_RGB2GRAY) # 原图frame2转换为frame2gray格式

    # 3. 图像做帧差法,结果为res
    cv.AbsDiff(frame1gray, frame2gray, res) # 比较frame1gray 和 frame2gray 的差,结果输出给res
    # res = cv2.absdiff(frame1gray, frame2gray) # 此为cv2新版api
    cv.ShowImage("After AbsDiff", res)

    # 4. 保存res作为前景目标foreground
    # cv.Convert(res, gray)
    # out_foreground.write(np.uint8(res)) # 保存为前景目标
    # out_foreground.write(np.asarray(cv.GetMat(res)))
    cv.WriteFrame(out_foreground, cv.GetImage(res)) # res格式为cvmat格式,转化为iplimage格式

    # 5. 平滑处理
    # cv.Smooth(res, res, cv.CV_BLUR, 5, 5) # 光滑一下res

    # 6. 形态学变换,开闭操作
    element = cv.CreateStructuringElementEx(5*2+1, 5*2+1, 5, 5,  cv.CV_SHAPE_RECT) # CreateStructuringElementEx(cols, rows, anchorX, anchorY, shape, values=None)

    cv.MorphologyEx(res, res, None, None, cv.CV_MOP_OPEN) # cv2.morphologyEx(src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]])
    cv.MorphologyEx(res, res, None, None, cv.CV_MOP_CLOSE) # 形态学变换相应的开 闭操作

    # 7. 二值化阈值处理:对得到的前景进行阈值选择,去掉伪前景
    cv.Threshold(res, res, 10, 255, cv.CV_THRESH_BINARY) # 二值化 cv.Threshold(src, dst, threshold, maxValue, thresholdType)


    # 8. blob提取电梯区域 ------------- 未完
コード例 #18
0
ファイル: track.py プロジェクト: otilrac/omniVision
    def run(self):
        # Initialize
        #log_file_name = "tracker_output.log"
        #log_file = file( log_file_name, 'a' )

        frame = cv.QueryFrame(self.capture)
        frame_size = cv.GetSize(frame)

        # Capture the first frame from webcam for image properties
        display_image = cv.QueryFrame(self.capture)

        # Greyscale image, thresholded to create the motion mask:
        grey_image = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 1)

        # The RunningAvg() function requires a 32-bit or 64-bit image...
        running_average_image = cv.CreateImage(cv.GetSize(frame),
                                               cv.IPL_DEPTH_32F, 3)
        # ...but the AbsDiff() function requires matching image depths:
        running_average_in_display_color_depth = cv.CloneImage(display_image)

        # RAM used by FindContours():
        mem_storage = cv.CreateMemStorage(0)

        # The difference between the running average and the current frame:
        difference = cv.CloneImage(display_image)

        target_count = 1
        last_target_count = 1
        last_target_change_t = 0.0
        k_or_guess = 1
        codebook = []
        frame_count = 0
        last_frame_entity_list = []

        t0 = time.time()

        # For toggling display:
        image_list = ["camera", "difference", "threshold", "display", "faces"]
        image_index = 0  # Index into image_list

        # Prep for text drawing:
        text_font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX, .5, .5, 0.0, 1,
                                cv.CV_AA)
        text_coord = (5, 15)
        text_color = cv.CV_RGB(255, 255, 255)

        ###############################
        ### Face detection stuff
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_default.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt2.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_mcs_mouth.xml' )
        haar_cascade = cv.Load('haarcascades/haarcascade_eye.xml')
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_frontalface_alt_tree.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_upperbody.xml' )
        #haar_cascade = cv.Load( 'haarcascades/haarcascade_profileface.xml' )

        # Set this to the max number of targets to look for (passed to k-means):
        max_targets = 3

        while True:

            # Capture frame from webcam
            camera_image = cv.QueryFrame(self.capture)

            frame_count += 1
            frame_t0 = time.time()

            # Create an image with interactive feedback:
            display_image = cv.CloneImage(camera_image)

            # Create a working "color image" to modify / blur
            color_image = cv.CloneImage(display_image)

            # Smooth to get rid of false positives
            cv.Smooth(color_image, color_image, cv.CV_GAUSSIAN, 19, 0)

            # Use the Running Average as the static background
            # a = 0.020 leaves artifacts lingering way too long.
            # a = 0.320 works well at 320x240, 15fps.  (1/a is roughly num frames.)
            cv.RunningAvg(color_image, running_average_image, 0.320, None)

            # Convert the scale of the moving average.
            cv.ConvertScale(running_average_image,
                            running_average_in_display_color_depth, 1.0, 0.0)

            # Subtract the current frame from the moving average.
            cv.AbsDiff(color_image, running_average_in_display_color_depth,
                       difference)

            # Convert the image to greyscale.
            cv.CvtColor(difference, grey_image, cv.CV_RGB2GRAY)

            # Threshold the image to a black and white motion mask:
            cv.Threshold(grey_image, grey_image, 2, 255, cv.CV_THRESH_BINARY)
            # Smooth and threshold again to eliminate "sparkles"
            cv.Smooth(grey_image, grey_image, cv.CV_GAUSSIAN, 19, 0)
            cv.Threshold(grey_image, grey_image, 240, 255, cv.CV_THRESH_BINARY)

            grey_image_as_array = np.asarray(cv.GetMat(grey_image))
            non_black_coords_array = np.where(grey_image_as_array > 3)
            # Convert from np.where()'s two separate lists to one list of (x, y) tuples:
            non_black_coords_array = zip(non_black_coords_array[1],
                                         non_black_coords_array[0])

            points = [
            ]  # Was using this to hold either pixel coords or polygon coords.
            bounding_box_list = []

            # Now calculate movements using the white pixels as "motion" data
            contour = cv.FindContours(grey_image, mem_storage,
                                      cv.CV_RETR_CCOMP,
                                      cv.CV_CHAIN_APPROX_SIMPLE)

            while contour:

                bounding_rect = cv.BoundingRect(list(contour))
                point1 = (bounding_rect[0], bounding_rect[1])
                point2 = (bounding_rect[0] + bounding_rect[2],
                          bounding_rect[1] + bounding_rect[3])

                bounding_box_list.append((point1, point2))
                polygon_points = cv.ApproxPoly(list(contour), mem_storage,
                                               cv.CV_POLY_APPROX_DP)

                # To track polygon points only (instead of every pixel):
                #points += list(polygon_points)

                # Draw the contours:
                ###cv.DrawContours(color_image, contour, cv.CV_RGB(255,0,0), cv.CV_RGB(0,255,0), levels, 3, 0, (0,0) )
                cv.FillPoly(grey_image, [
                    list(polygon_points),
                ], cv.CV_RGB(255, 255, 255), 0, 0)
                cv.PolyLine(display_image, [
                    polygon_points,
                ], 0, cv.CV_RGB(255, 255, 255), 1, 0, 0)
                #cv.Rectangle( display_image, point1, point2, cv.CV_RGB(120,120,120), 1)

                contour = contour.h_next()

            # Find the average size of the bbox (targets), then
            # remove any tiny bboxes (which are prolly just noise).
            # "Tiny" is defined as any box with 1/10th the area of the average box.
            # This reduces false positives on tiny "sparkles" noise.
            box_areas = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]
                box_areas.append(box_width * box_height)

                #cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(255,0,0), 1)

            average_box_area = 0.0
            if len(box_areas):
                average_box_area = float(sum(box_areas)) / len(box_areas)

            trimmed_box_list = []
            for box in bounding_box_list:
                box_width = box[right][0] - box[left][0]
                box_height = box[bottom][0] - box[top][0]

                # Only keep the box if it's not a tiny noise box:
                if (box_width * box_height) > average_box_area * 0.1:
                    trimmed_box_list.append(box)

            # Draw the trimmed box list:
            #for box in trimmed_box_list:
            #	cv.Rectangle( display_image, box[0], box[1], cv.CV_RGB(0,255,0), 2 )

            bounding_box_list = merge_collided_bboxes(trimmed_box_list)

            # Draw the merged box list:
            for box in bounding_box_list:
                cv.Rectangle(display_image, box[0], box[1],
                             cv.CV_RGB(0, 255, 0), 1)

            # Here are our estimate points to track, based on merged & trimmed boxes:
            estimated_target_count = len(bounding_box_list)

            # Don't allow target "jumps" from few to many or many to few.
            # Only change the number of targets up to one target per n seconds.
            # This fixes the "exploding number of targets" when something stops moving
            # and the motion erodes to disparate little puddles all over the place.

            if frame_t0 - last_target_change_t < .350:  # 1 change per 0.35 secs
                estimated_target_count = last_target_count
            else:
                if last_target_count - estimated_target_count > 1:
                    estimated_target_count = last_target_count - 1
                if estimated_target_count - last_target_count > 1:
                    estimated_target_count = last_target_count + 1
                last_target_change_t = frame_t0

            # Clip to the user-supplied maximum:
            estimated_target_count = min(estimated_target_count, max_targets)

            # The estimated_target_count at this point is the maximum number of targets
            # we want to look for.  If kmeans decides that one of our candidate
            # bboxes is not actually a target, we remove it from the target list below.

            # Using the numpy values directly (treating all pixels as points):
            points = np.array(non_black_coords_array, dtype='f')
            center_points = []

            if len(points):

                # If we have all the "target_count" targets from last frame,
                # use the previously known targets (for greater accuracy).
                k_or_guess = max(estimated_target_count,
                                 1)  # Need at least one target to look for.
                if len(codebook) == estimated_target_count:
                    k_or_guess = codebook

                #points = vq.whiten(array( points ))  # Don't do this!  Ruins everything.
                codebook, distortion = vq.kmeans(array(points), k_or_guess)

                # Convert to tuples (and draw it to screen)
                for center_point in codebook:
                    center_point = (int(center_point[0]), int(center_point[1]))
                    center_points.append(center_point)
                    #cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 0, 0), 2)
                    #cv.Circle(display_image, center_point, 5, cv.CV_RGB(255, 0, 0), 3)

            # Now we have targets that are NOT computed from bboxes -- just
            # movement weights (according to kmeans).  If any two targets are
            # within the same "bbox count", average them into a single target.
            #
            # (Any kmeans targets not within a bbox are also kept.)
            trimmed_center_points = []
            removed_center_points = []

            for box in bounding_box_list:
                # Find the centers within this box:
                center_points_in_box = []

                for center_point in center_points:
                    if center_point[0] < box[right][0] and center_point[0] > box[left][0] and \
                     center_point[1] < box[bottom][1] and center_point[1] > box[top][1] :

                        # This point is within the box.
                        center_points_in_box.append(center_point)

                # Now see if there are more than one.  If so, merge them.
                if len(center_points_in_box) > 1:
                    # Merge them:
                    x_list = y_list = []
                    for point in center_points_in_box:
                        x_list.append(point[0])
                        y_list.append(point[1])

                    average_x = int(float(sum(x_list)) / len(x_list))
                    average_y = int(float(sum(y_list)) / len(y_list))

                    trimmed_center_points.append((average_x, average_y))

                    # Record that they were removed:
                    removed_center_points += center_points_in_box

                if len(center_points_in_box) == 1:
                    trimmed_center_points.append(
                        center_points_in_box[0])  # Just use it.

            # If there are any center_points not within a bbox, just use them.
            # (It's probably a cluster comprised of a bunch of small bboxes.)
            for center_point in center_points:
                if (not center_point in trimmed_center_points) and (
                        not center_point in removed_center_points):
                    trimmed_center_points.append(center_point)

            # Draw what we found:
            #for center_point in trimmed_center_points:
            #	center_point = ( int(center_point[0]), int(center_point[1]) )
            #	cv.Circle(display_image, center_point, 20, cv.CV_RGB(255, 255,255), 1)
            #	cv.Circle(display_image, center_point, 15, cv.CV_RGB(100, 255, 255), 1)
            #	cv.Circle(display_image, center_point, 10, cv.CV_RGB(255, 255, 255), 2)
            #	cv.Circle(display_image, center_point, 5, cv.CV_RGB(100, 255, 255), 3)

            # Determine if there are any new (or lost) targets:
            actual_target_count = len(trimmed_center_points)
            last_target_count = actual_target_count

            # Now build the list of physical entities (objects)
            this_frame_entity_list = []

            # An entity is list: [ name, color, last_time_seen, last_known_coords ]

            for target in trimmed_center_points:

                # Is this a target near a prior entity (same physical entity)?
                entity_found = False
                entity_distance_dict = {}

                for entity in last_frame_entity_list:

                    entity_coords = entity[3]
                    delta_x = entity_coords[0] - target[0]
                    delta_y = entity_coords[1] - target[1]

                    distance = sqrt(pow(delta_x, 2) + pow(delta_y, 2))
                    entity_distance_dict[distance] = entity

                # Did we find any non-claimed entities (nearest to furthest):
                distance_list = entity_distance_dict.keys()
                distance_list.sort()

                for distance in distance_list:

                    # Yes; see if we can claim the nearest one:
                    nearest_possible_entity = entity_distance_dict[distance]

                    # Don't consider entities that are already claimed:
                    if nearest_possible_entity in this_frame_entity_list:
                        #print "Target %s: Skipping the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3], nearest_possible_entity[1] )
                        continue

                    #print "Target %s: USING the one iwth distance: %d at %s, C:%s" % (target, distance, nearest_possible_entity[3] , nearest_possible_entity[1])
                    # Found the nearest entity to claim:
                    entity_found = True
                    nearest_possible_entity[
                        2] = frame_t0  # Update last_time_seen
                    nearest_possible_entity[
                        3] = target  # Update the new location
                    this_frame_entity_list.append(nearest_possible_entity)
                    #log_file.write( "%.3f MOVED %s %d %d\n" % ( frame_t0, nearest_possible_entity[0], nearest_possible_entity[3][0], nearest_possible_entity[3][1]  ) )
                    break

                if entity_found == False:
                    # It's a new entity.
                    color = (random.randint(0, 255), random.randint(0, 255),
                             random.randint(0, 255))
                    name = hashlib.md5(str(frame_t0) +
                                       str(color)).hexdigest()[:6]
                    last_time_seen = frame_t0

                    new_entity = [name, color, last_time_seen, target]
                    this_frame_entity_list.append(new_entity)
                    #log_file.write( "%.3f FOUND %s %d %d\n" % ( frame_t0, new_entity[0], new_entity[3][0], new_entity[3][1]  ) )

            # Now "delete" any not-found entities which have expired:
            entity_ttl = 1.0  # 1 sec.

            for entity in last_frame_entity_list:
                last_time_seen = entity[2]
                if frame_t0 - last_time_seen > entity_ttl:
                    # It's gone.
                    #log_file.write( "%.3f STOPD %s %d %d\n" % ( frame_t0, entity[0], entity[3][0], entity[3][1]  ) )
                    pass
                else:
                    # Save it for next time... not expired yet:
                    this_frame_entity_list.append(entity)

            # For next frame:
            last_frame_entity_list = this_frame_entity_list

            # Draw the found entities to screen:
            for entity in this_frame_entity_list:
                center_point = entity[3]
                c = entity[1]  # RGB color tuple
                cv.Circle(display_image, center_point, 20,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 15,
                          cv.CV_RGB(c[0], c[1], c[2]), 1)
                cv.Circle(display_image, center_point, 10,
                          cv.CV_RGB(c[0], c[1], c[2]), 2)
                cv.Circle(display_image, center_point, 5,
                          cv.CV_RGB(c[0], c[1], c[2]), 3)

            #print "min_size is: " + str(min_size)
            # Listen for ESC or ENTER key
            c = cv.WaitKey(7) % 0x100
            if c == 27 or c == 10:
                break

            # Toggle which image to show
            if chr(c) == 'd':
                image_index = (image_index + 1) % len(image_list)

            image_name = image_list[image_index]

            # Display frame to user
            if image_name == "camera":
                image = camera_image
                cv.PutText(image, "Camera (Normal)", text_coord, text_font,
                           text_color)
            elif image_name == "difference":
                image = difference
                cv.PutText(image, "Difference Image", text_coord, text_font,
                           text_color)
            elif image_name == "display":
                image = display_image
                cv.PutText(image, "Targets (w/AABBs and contours)", text_coord,
                           text_font, text_color)
            elif image_name == "threshold":
                # Convert the image to color.
                cv.CvtColor(grey_image, display_image, cv.CV_GRAY2RGB)
                image = display_image  # Re-use display image here
                cv.PutText(image, "Motion Mask", text_coord, text_font,
                           text_color)
            elif image_name == "faces":
                # Do face detection
                detect_faces(camera_image, haar_cascade, mem_storage)
                image = camera_image  # Re-use camera image here
                cv.PutText(image, "Face Detection", text_coord, text_font,
                           text_color)

            size = cv.GetSize(image)
            large = cv.CreateImage(
                (int(size[0] * display_ratio), int(size[1] * display_ratio)),
                image.depth, image.nChannels)
            cv.Resize(image, large, interpolation=cv2.INTER_CUBIC)
            cv.ShowImage("Target", large)

            if self.writer:
                cv.WriteFrame(self.writer, image)

            #log_file.flush()

            # If only using a camera, then there is no time.sleep() needed,
            # because the camera clips us to 15 fps.  But if reading from a file,
            # we need this to keep the time-based target clipping correct:
            frame_t1 = time.time()

            # If reading from a file, put in a forced delay:
            if not self.writer:
                delta_t = frame_t1 - frame_t0
                if delta_t < (1.0 / 15.0): time.sleep((1.0 / 15.0) - delta_t)

        t1 = time.time()
        time_delta = t1 - t0
        processed_fps = float(frame_count) / time_delta
        print "Got %d frames. %.1f s. %f fps." % (frame_count, time_delta,
                                                  processed_fps)
コード例 #19
0
    def sd_loop(self):
        """
        The main seizure detector loop - call this function to start
        the seizure detector.
        """
        self.timeSeries = []  # array of times that data points were collected.
        self.maxFreq = None
        if (self.X11):
            cv.NamedWindow('Seizure_Detector', cv.CV_WINDOW_AUTOSIZE)
            cv.CreateTrackbar('FeatureTrackbar', 'Seizure_Detector', 0,
                              self.MAX_COUNT, self.onTrackbarChanged)
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, 8)

        # Intialise the video input source
        # ('camera' - may be a file or network stream though).
        #camera = cv.CaptureFromFile("rtsp://192.168.1.18/live_mpeg4.sdp")
        #camera = cv.CaptureFromFile("../testcards/testcard.mpg")
        #camera = cv.CaptureFromFile("/home/graham/laura_sample.mpeg")
        camera = cv.CaptureFromCAM(0)

        # Set the VideoWriter that produces the output video file.
        frameSize = (640, 480)
        videoFormat = cv.FOURCC('p', 'i', 'm', '1')
        # videoFormat = cv.FOURCC('l','m','p','4')
        vw = cv.CreateVideoWriter(self.videoOut, videoFormat, self.outputfps,
                                  frameSize, 1)
        if (vw == None):
            print "ERROR - Failed to create VideoWriter...."

        # Get the first frame.
        last_analysis_time = datetime.datetime.now()
        last_feature_search_time = datetime.datetime.now()
        last_frame_time = datetime.datetime.now()
        frame = cv.QueryFrame(camera)

        print "frame="
        print frame

        # Main loop - repeat forever
        while 1:
            # Carry out initialisation, memory allocation etc. if necessary
            if self.image is None:
                self.image = cv.CreateImage(cv.GetSize(frame), 8, 3)
                self.image.origin = frame.origin
                grey = cv.CreateImage(cv.GetSize(frame), 8, 1)
                prev_grey = cv.CreateImage(cv.GetSize(frame), 8, 1)
                pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1)
                prev_pyramid = cv.CreateImage(cv.GetSize(frame), 8, 1)
                # self.features = []

            # copy the captured frame to our self.image object.
            cv.Copy(frame, self.image)

            # create a grey version of the image
            cv.CvtColor(self.image, grey, cv.CV_BGR2GRAY)

            # Look for features to track.
            if self.need_to_init:
                #cv.ShowImage ('loop_grey',grey)
                self.initFeatures(grey)
                self.timeSeries = []
                self.maxFreq = None
                last_analysis_time = datetime.datetime.now()
                self.need_to_init = False

            # Now track the features, if we have some.
            if self.features != []:
                # we have points to track, so track them and add them to
                # our time series of positions.
                self.features, status, track_error = cv.CalcOpticalFlowPyrLK(
                    prev_grey, grey, prev_pyramid, pyramid, self.features,
                    (self.win_size, self.win_size), 3,
                    (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03),
                    self.flags)
                self.timeSeries.append((last_frame_time, self.features))
                print "Features..."
                for featNo in range(len(self.features)):
                    if (status[featNo] == 0):
                        self.features[featNo] = (-1, -1)
                    print status[featNo], self.features[featNo]
                # and plot them.
                for featNo in range(len(self.features)):
                    pointPos = self.features[featNo]
                    cv.Circle(self.image, (int(pointPos[0]), int(pointPos[1])),
                              3, (0, 255, 0, 0), -1, 8, 0)
                    if (self.alarmActive[featNo] == 2):
                        cv.Circle(self.image,
                                  (int(pointPos[0]), int(pointPos[1])), 10,
                                  (0, 0, 255, 0), 5, 8, 0)
                    if (self.alarmActive[featNo] == 1):
                        cv.Circle(self.image,
                                  (int(pointPos[0]), int(pointPos[1])), 10,
                                  (0, 0, 255, 0), 2, 8, 0)

                    # there will be no maxFreq data until we have
                    # run doAnalysis for the first time.
                    if (not self.maxFreq == None):
                        msg = "%d-%3.1f" % (featNo, self.maxFreq[featNo])
                        cv.PutText(
                            self.image, msg,
                            (int(pointPos[0] + 5), int(pointPos[1] + 5)), font,
                            (255, 255, 255))
                # end of for loop over features
            else:
                #print "Oh no, no features to track, and you haven't told me to look for more."
                # no features, so better look for some more...
                self.need_to_init = True

            # Is it time to analyse the captured time series.
            if ((datetime.datetime.now() - last_analysis_time).total_seconds()
                    > self.Analysis_Period):
                if (len(self.timeSeries) > 0):
                    self.doAnalysis()
                    self.doAlarmCheck()
                    last_analysis_time = datetime.datetime.now()
                else:
                    # print "Not doing analysis - no time series data..."
                    a = True

            # Is it time to re-acquire the features to track.
            if ((datetime.datetime.now() -
                 last_feature_search_time).total_seconds() >
                    self.Feature_Search_Period):
                print "resetting..."
                last_feature_search_time = datetime.datetime.now()
                self.need_to_init = True

            # save current data for use next time around.
            prev_grey, grey = grey, prev_grey
            prev_pyramid, pyramid = pyramid, prev_pyramid

            # we can now display the image
            if (self.X11): cv.ShowImage('Seizure_Detector', self.image)
            cv.WriteFrame(vw, self.image)

            # handle events
            c = cv.WaitKey(10)
            if c == 27:
                # user has press the ESC key, so exit
                break

            # Control frame rate by pausing if we are going too fast.
            frameTime = (datetime.datetime.now() - last_frame_time)\
                .total_seconds()
            actFps = 1.0 / frameTime
            if (frameTime < 1 / self.inputfps):
                cv.WaitKey(1 + int(1000. * (1. / self.inputfps - frameTime)))

            # Grab the next frame
            last_frame_time = datetime.datetime.now()
            frame = cv.QueryFrame(camera)