예제 #1
0
def demo_videoMontage():
    import os

    TOYCAR_VIDEO = os.path.join(pv.__path__[0],'data','test','toy_car.m4v')
    TAZ_VIDEO = os.path.join(pv.__path__[0],'data','test','TazSample.m4v')
    
    vid1 = pv.Video(TOYCAR_VIDEO)
    vid2 = pv.Video(TAZ_VIDEO)
    
    vm = VideoMontage({"V1":vid1,"V2":vid2}, layout=(2,1), tileSize=(256,192))
    for img in vm:
        img.show("Video Montage", delay=60, pos=(10,10))
예제 #2
0
def demo_videoMontage():
    import os

    TOYCAR_VIDEO = os.path.join(pv.__path__[0], 'data', 'test', 'toy_car.m4v')
    TAZ_VIDEO = os.path.join(pv.__path__[0], 'data', 'test', 'TazSample.m4v')

    vid1 = pv.Video(TOYCAR_VIDEO)
    vid2 = pv.Video(TAZ_VIDEO)
    #vid3 = pv.Video(TOYCAR_VIDEO)
    #vid4 = pv.Video(TAZ_VIDEO)
    vid_dict = {"V1": vid1, "V2": vid2}  #, "V3":vid3, "V4":vid4}
    vm = VideoMontage(vid_dict, layout=(2, 1), tile_size=(256, 192))
    vm.play("Video Montage", delay=60, pos=(10, 10))
예제 #3
0
    def testMotionDetectFD(self):
        ilog = None  # pv.ImageLog()

        md = pv.MotionDetector(method=pv.BG_SUBTRACT_FD, minArea=200)

        video = pv.Video(BUGS_VIDEO)

        i = 0
        for frame in video:
            _ = md.detect(frame)

            _ = md.getStandardizedRects()
            _ = md.getBoundingRects()

            _ = md.getPolygons(return_all=True)

            if ilog != None:
                print("Processing Frame:", i)

                key_frame = md.getKeyFrame()

                md.annotateFrame(key_frame)

                if key_frame != None:
                    ilog(key_frame, format='jpg')

            i += 1
            if i > 20: break

        if ilog != None:
            ilog.show()
예제 #4
0
    def testHomographies(self):
        ilog = None  # pv.ImageLog()

        flow = pv.OpticalFlow()

        video = pv.Video(TAZ_VIDEO)

        i = 0
        prev_frame = None
        for frame in video:

            flow.update(frame)

            flow.annotateFrame(frame)
            if ilog != None:
                print("Processing Frame:", i)
                if hasattr(frame, 'to_prev'):
                    prev_im = frame.to_prev(frame)
                    ilog(prev_im, 'back', format='jpg')

                if prev_frame != None:
                    forward_im = prev_frame.to_next(prev_frame)
                    ilog(forward_im, 'forward', format='jpg')

                ilog(frame, "current", format='jpg')

            i += 1
            if i > 10: break

            prev_frame = frame

        if ilog != None:
            ilog.show()
예제 #5
0
    def testMotionDetectMCFD(self):
        ilog = None  # pv.ImageLog()

        #flow = pv.OpticalFlow()   #This is now encapsulated in MCFD object
        md = pv.MotionDetector(method=pv.BG_SUBTRACT_MCFD,
                               minArea=200,
                               rect_type=pv.STANDARDIZED_RECTS)
        video = pv.Video(TOYCAR_VIDEO)

        i = 0
        for frame in video:

            #flow.update(frame) #no longer required, this is encapsulated
            md.detect(frame)

            if ilog != None:
                print("Processing Frame:", i)
                #flow.annotateFrame(frame)  #this is encapsulated in the md.annotateFrame method

                key_frame = md.getKeyFrame()
                md.annotateFrame(key_frame)

                if key_frame != None:
                    ilog(key_frame, format='jpg')

            i += 1
            #if i > 20: break

        if ilog != None:
            ilog.show()
예제 #6
0
    def testSync(self):
        """Video Sync Test"""
        # Tests a kludge that makes sure the first frame of video is read properly.

        # Uncomment next line to show image diagnostics
        ilog = None  # pv.ImageLog()
        video_path = os.path.join(DATA_DIR, SYNC_VIDEO)
        video = pv.Video(video_path)

        frame_num = 0
        for frame_name in SYNC_FRAMES:
            frame_path = os.path.join(DATA_DIR, frame_name)
            ffmpeg_frame = pv.Image(frame_path)
            opencv_frame = video.next()
            #print ffmpeg_frame.asMatrix3D().shape
            #print opencv_frame.asMatrix3D().shape
            diff = ffmpeg_frame.asMatrix3D() - opencv_frame.asMatrix3D()
            diff_max = max(abs(diff.max()), abs(diff.min()))
            self.assert_(diff_max < 30.0)  # Test on MacOS never exceeds 25
            diff = pv.Image(diff)
            if ilog != None:
                #print frame_name,diff_max
                ilog(ffmpeg_frame, "ffmpeg_%04d" % frame_num)
                ilog(opencv_frame, "opencv_%04d" % frame_num)
                ilog(diff, "diff_%04d" % frame_num)
            frame_num += 1

        # Make sure that this is the last frame of the video
        self.assertRaises(StopIteration, video.next)

        if ilog != None:
            ilog.show()
예제 #7
0
def runChangeDetectionExample():
    video = pv.Video(pv.BUGS_VIDEO)

    vtm = vtm.VideoTaskManager(buffer_size=2, debug_level=2)
    vtm.addTaskFactory(ChangeDetectionVT)
    vtm.addTaskFactory(ChangeDetectionAnnotationVT)

    for frame in video:
        vtm.addFrame(frame)
예제 #8
0
    def testVideoFrameCount(self):
        """Frame Count Test"""
        video_path = os.path.join(DATA_DIR, SYNC_VIDEO)
        video = pv.Video(video_path)

        count = 0
        for _ in video:
            #_.show(delay=0)
            count += 1

        self.assertEquals(count, 5)
예제 #9
0
    def testOpticalFlow(self):
        ilog = None  # pv.ImageLog()

        flow = pv.OpticalFlow()

        video = pv.Video(TAZ_VIDEO)

        i = 0
        for frame in video:
            flow.update(frame)

            flow.annotateFrame(frame)
            if ilog != None:
                print("Processing Frame:", i)
                ilog(frame, format='jpg')

            i += 1
            if i > 10: break

        if ilog != None:
            ilog.show()
예제 #10
0
    '''
    This function opens a high gui window that displays the image.  Any 
    points that are clicked will be returned after the user presses the 
    space bar.
    
    @param im: An image to display.
    @param default_points: Some default points to display.
    @type default_points: list of pv.Point
    @type default_points: list
    @param keep_window_open: keep the window open after point were captured
    @type True|False
    @param window: The name of the window
    @type window: string
    @returns: a list of points that were clicked by the user.
    '''
    if isinstance(im, pv.Image):
        cap = CaptureClicks(im, *args, **kwargs)
        clicks = cap.display()
    else:
        cap = CaptureClicksVideo(im, *args, **kwargs)
        clicks = cap.display()
    return clicks


if __name__ == '__main__':
    #im = pv.Image(pv.TAZ_IMAGE)
    #pv.capturePointsFromMouse(im)

    video = pv.Video(pv.TAZ_VIDEO)
    ccv = capturePointsFromMouse(video)
예제 #11
0
'''
Created on Jul 22, 2011
@author: Stephen O'Hara

This demonstration will play the streaming video from a compliant IP (network) video camera.
Getting the url correct for your make/model camera is critical, and you'll also need to
have OpenCV built with ffmpeg support. If the dependencies are met, you can see that this
is two lines of trivial pyvision code!
'''
import pyvision as pv

#The following is the rtsp url for a linksys WVC54GCA IP camera,
# which can be purchased for less than $100
# Of course the ip address in the middle of the URL below will
# need to be changed as appropriate for your local network.
cam_url = "rtsp://192.168.2.55/img/video.sav"

if __name__ == '__main__':
    pass

print("Please be patient, it can take several seconds to buffer live video...")
print(
    "When video is playing, if you click on the video window and then hold down the spacebar, you can pause it."
)
print(
    "When paused, you can hit 's' to step one frame at a time, 'c' to continue playback, or 'q' to quit."
)
vid = pv.Video(cam_url)
vid.play()
예제 #12
0
파일: testsuite.py 프로젝트: mdqyy/pyvision
    def testFrameCount(self):
        """Frame Count Test"""
        video_path = os.path.join(DATA_DIR, SYNC_VIDEO)
        video = pv.Video(video_path)

        self.assertEquals(len(video), 5)
예제 #13
0
'''

import pyvision as pv
from pyvision.types.Video import Video
import ocof
import os.path
import cv
import time
import copy

TAZ_FILENAME = os.path.join(ocof.__path__[0], 'test', 'data', 'capture1.mp4')
global TAZ_RECT
TAZ_RECT = pv.Rect(200, 200, 120, 120)

# print TAZ_FILENAME
video = pv.Video(TAZ_FILENAME)
webcam = pv.Webcam()

tracker = None

global startPointx
global startPointy
global flagDraw
global src
startPointx = 0
startPointy = 0
flagDraw = False


def onMouseEvent(event, x, y, flags, param):
    global startPointx