Example #1
0
def frame_reduction(in_file_name, fps=5):
    vid_file = cv.CaptureFromFile(in_file_name)
    vid_frames = cv.GetCaptureProperty(vid_file, cv.CV_CAP_PROP_FRAME_COUNT)
    vid_fps = cv.GetCaptureProperty(vid_file, cv.CV_CAP_PROP_FPS)
    img_list = []
    for f in range(int(vid_frames)):
        if (f % (int(vid_frames / fps)) == 0):
            frameImg = cv.QueryFrame(vid_file)
            img_list.append(frameImg)
    return img_list
Example #2
0
    def __init__(self, camera_num=0, xmin=0, xmax=300, ymin=0, ymax=300):
        self.xmin = xmin
        self.xmax = xmax
        self.ymin = ymin
        self.ymax = ymax

        self.cam = cv.CaptureFromCAM(camera_num)
        print "W:", cv.GetCaptureProperty(self.cam, cv.CV_CAP_PROP_FRAME_WIDTH)
        print "H:", cv.GetCaptureProperty(self.cam,
                                          cv.CV_CAP_PROP_FRAME_HEIGHT)
        print "M:", cv.GetCaptureProperty(self.cam, cv.CV_CAP_PROP_MODE)
Example #3
0
def _get_video_properties(filename):
    cap = cv.CaptureFromFile(filename)
    if not cap:
        raise IOError('CaptureFromFile')
    fps = cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FPS)
    width = cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_WIDTH)
    height = cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_HEIGHT)
    fps = float(fps)
    fps = min(fps, 30.0)
    fps = max(fps, 15.0)
    width = int(width)
    height = int(height)
    return (fps, width, height)
Example #4
0
def num_frames(video_file):
    """Return the number of frames in a video file."""
    cap = cv.CaptureFromFile(video_file)
    if not cap:
        raise IOError('CaptureFromFile')
    n = cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_COUNT)
    return int(n)
Example #5
0
    def __init__(self, parent):
        wx.Panel.__init__(self, parent)

        #magic to stop the flickering
        def SetCompositeMode(self, on=True):
            exstyle = win32api.GetWindowLong(self.GetHandle(),
                                             win32con.GWL_EXSTYLE)
            if on:
                exstyle |= win32con.WS_EX_COMPOSITED
            else:
                exstyle &= ~win32con.WS_EX_COMPOSITED
            win32api.SetWindowLong(self.GetHandle(), win32con.GWL_EXSTYLE,
                                   exstyle)

        SetCompositeMode(self, True)

        #self.capture = cv.CaptureFromCAM(0) # turn on the webcam
        #img = ImagePro # Convert the raw image data to something wxpython can handle.
        #cv.CvtColor(img, img, cv.CV_BGR2RGB) # fix color distortions
        storage = cv.CreateMat(orig.width, 1, cv.CV_32FC3)
        self.ImagePro(capture, orig, processed, storage, grid)
        cv.CvtColor(orig, orig, cv.CV_BGR2RGB)
        self.bmp = wx.BitmapFromBuffer(640, 300, orig.tostring())
        sbmp = wx.StaticBitmap(self, -1,
                               bitmap=self.bmp)  # Display the resulting image

        self.playTimer = wx.Timer(self, self.TIMER_PLAY_ID)
        wx.EVT_TIMER(self, self.TIMER_PLAY_ID, self.onNextFrame)
        fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)

        if fps != 0: self.playTimer.Start(1000 / fps)  # every X ms
        else: self.playTimer.Start(1000 / 15)  # assuming 15 fps
Example #6
0
 def __init__(self, videoFile, gTruthXmlFile, outputXml, platesDir):
     self.xml = XmlWriter(outputXml)
     self.gtruth = XmlGTruthReader(gTruthXmlFile)
     self.capture = cv.CaptureFromFile(videoFile)
     cv.NamedWindow("Video", cv.CV_WINDOW_AUTOSIZE)
     self.w = cv.GetCaptureProperty(self.capture,
                                    cv.CV_CAP_PROP_FRAME_WIDTH)
     self.h = cv.GetCaptureProperty(self.capture,
                                    cv.CV_CAP_PROP_FRAME_HEIGHT)
     self.nFrames = cv.GetCaptureProperty(self.capture,
                                          cv.CV_CAP_PROP_FRAME_HEIGHT)
     print('Total number of Frames in video: ' + str(self.nFrames))
     self.font = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN, 1, 1, 0, 1, 1)
     print("resolution: " + str(int(self.w)) + "x" + str(int(self.h)))
     self.fx = 0.85
     self.fy = 0.85
     self.iframe = self.xml.iframe
     self.plate = True
     self.radar = False
     self.semaphore = False
     self.moto = False
     self.currv = None
     self.mouse = MouseEvent()
     self.select = PlaceSelection()
     ' create platesDir if needed '
     self.plateCarRadar = platesDir + "/car-radar"
     self.plateCarNoRad = platesDir + "/car-noradar"
     self.plateMotoRadar = platesDir + "/moto-radar"
     self.plateMotoNoRad = platesDir + "/moto-noradar"
     if not os.path.isdir(platesDir):
         os.mkdir(platesDir)
     if not os.path.isdir(self.plateCarRadar):
         os.mkdir(self.plateCarRadar)
     if not os.path.isdir(self.plateCarNoRad):
         os.mkdir(self.plateCarNoRad)
     if not os.path.isdir(self.plateMotoRadar):
         os.mkdir(self.plateMotoRadar)
     if not os.path.isdir(self.plateMotoNoRad):
         os.mkdir(self.plateMotoNoRad)
Example #7
0
def normalize_video_lenght(i_name, o_name, fps, length, verbose=False):
    capture = cv.CaptureFromFile(i_name)
    width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
    height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
    original_fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
    original_frames_number = cv.GetCaptureProperty(capture,
                                                   cv.CV_CAP_PROP_FRAME_COUNT)
    final_frames_number = int(fps * length)

    if verbose:
        print '\ninput video: ', i_name
        print 'size: %s:%s' % (width, height), ' fps:', original_fps, 'frames:', \
                original_frames_number, 'estimated length:', float(original_frames_number)/original_fps

        print '\noutput video: ', o_name
        print 'size: %s:%s' % (width, height), ' fps:', fps, 'frames:', \
                final_frames_number, 'estimated length:', float(final_frames_number)/fps, '\n'

    my_fourcc = cv.CV_FOURCC('m', 'p', 'g', '2')
    writer = cv.CreateVideoWriter(o_name, my_fourcc, fps, (width, height))

    diff = final_frames_number - original_frames_number
    step = operation = None
    if diff > 0:
        step = int(original_frames_number / diff)
        operation = expand_video
    elif diff < 0:
        step = int(final_frames_number / abs(diff))
        operation = reduce_video

    if step == 0:
        print 'The desired final length is too short'
        return 1

    result = operation(capture, writer, final_frames_number, step, verbose)
    if verbose:
        print 'A total of', result, 'frames were removed/duplicated from the original video.'
        return 0
    def run(self):
        initialTime = 0.  #sets the initial time
        num_Frames = int(
            cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_FRAME_COUNT))
        fps = cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_FPS)

        for ii in range(num_Frames - 8):

            print('Frame: ' + str(ii) + ' of ' + str(num_Frames))
            # read the ii-th frame
            img = cv.QueryFrame(self.capture)

            # Blur the source image to reduce color noise
            cv.Smooth(img, img, cv.CV_BLUR, 3)

            # Convert the image to hsv(Hue, Saturation, Value) so its
            # It's easier to determine the color to track(hue)
            hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
            cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

            # limit all pixels that don't match our criteria, in the	is case we are
            # looking for purple but if you want you can adjust the first value in
            # both turples which is the hue range(120,140).  OpenCV uses 0-180 as
            # a hue range for the HSV color model
            thresholded_img = cv.CreateImage(cv.GetSize(hsv_img), 8, 1)

            # uncomment below for tracking blue
            #             cv.InRangeS(hsv_img, (112, 50, 50), (118, 200, 200), thresholded_img)

            # tracking red
            cv.InRangeS(hsv_img, (160, 150, 100), (180, 255, 255),
                        thresholded_img)

            #determine the objects moments and check that the area is large
            #enough to be our object
            thresholded_img2 = cv.GetMat(thresholded_img)
            moments = cv.Moments(thresholded_img2, 0)
            area = cv.GetCentralMoment(moments, 0, 0)

            # there can be noise in the video so ignore objects with small areas
            if (area > 2500):
                #determine the x and y coordinates of the center of the object
                #we are tracking by dividing the 1, 0 and 0, 1 moments by the area
                x = cv.GetSpatialMoment(moments, 1, 0) / area
                y = cv.GetSpatialMoment(moments, 0, 1) / area

                elapsedTime = ii / fps

                f.write(
                    str(elapsedTime) + ',' + '%013.9f' % x + ',' +
                    '%013.9f' % y + "\n"
                )  #prints output to the specified output file for later use


#
#                 x = int(x)
#                 y = int(y)
#
#                 #create an overlay to mark the center of the tracked object
#                 overlay = cv.CreateImage(cv.GetSize(img), 8, 3)
#
#                 cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20)
#                 cv.Add(img, overlay, img)
#                 #add the thresholded image back to the img so we can see what was
#                 #left after it was applied
#                 cv.Merge(thresholded_img, None, None, None, img)
#
#             #display the image
#             cv.ShowImage(color_tracker_window, img)

# close the data file
        f.close()
Example #9
0
    def detect_transitions(self, filename, imagepath):

        capturer = cv.CaptureFromFile(filename)
        self._fps = cv.GetCaptureProperty(capturer, cv.CV_CAP_PROP_FPS)
        detector = ImageMatcher(self.reduction_factor)
        nFrames = cv.GetCaptureProperty(capturer, cv.CV_CAP_PROP_FRAME_COUNT)

        print self._fps, nFrames
        #Fist we will check for all transitions in the video
        frame_number = 0
        transitions_detected = []
        while True:
            img = cv.QueryFrame(capturer)
            if (img == None):
                break

            #We will ignore some frames during the recognition process
            if frame_number % self.leap_step == 0:
                print 'Frame: %d/%d' % (frame_number, nFrames)
                similarity = detector.compare(img)
                if (similarity < self.similarity_threshold):
                    #logger.info('Transition found at frame %d in time %f', frame_number, float(frame_number)/self._fps)
                    transitions_detected.append((frame_number, similarity))
            frame_number += 1

        #Now we will agroup the transitions in order to detected the points of interest
        #Transitions which occurs too near should be combined in a single point of interest
        print transitions_detected

        begin_poi_frame = 0
        last_transition_frame = 0  #Use this number to assure the first transition
        frame_mininum_spacing = self._fps * self.minimun_spacing

        points_of_interest = []
        capturer = cv.CaptureFromFile(filename)
        frameIndex = 0
        img = None
        poiIndex = 0
        for current_frame, similarity in transitions_detected:
            #This frame is spaced enough to figure as a point of interest
            #logger.info('current_frame: %s, last_transition_frame: %s, frame_mininum_spacing: %s', \
            #                 current_frame, last_transition_frame, frame_mininum_spacing)
            if current_frame - last_transition_frame > frame_mininum_spacing:
                #Is this point of interest compound of more than one transition?
                begin_time = self.frame_to_time(begin_poi_frame)
                end_time = self.frame_to_time(last_transition_frame)
                #logger.info('end_time: %s, begin_time: %s, minimun_duration: %s', \
                #             end_time, begin_time, self.minimun_duration)

                if end_time - begin_time >= self.minimun_duration:
                    myname = "slide_transition_" + str(poiIndex) + ".png"
                    while frameIndex < begin_poi_frame + 5:
                        import os
                        img = cv.QueryFrame(capturer)
                        frameIndex += 1
                        if img is None:
                            break
                    print "Generating image: ", myname
                    cv.SaveImage(os.path.join(imagepath, myname), img)

                    poi = (begin_time, end_time, myname)
                    #logger.info('Adding Poi')
                    points_of_interest.append(poi)
                    begin_poi_frame = current_frame
                    poiIndex += 1
            else:
                pass
            last_transition_frame = current_frame

        #For last frame
        end_time = self.frame_to_time(last_transition_frame)
        begin_time = self.frame_to_time(begin_poi_frame)

        if end_time - begin_time >= self.minimun_duration:
            myname = "slide_transition_" + str(poiIndex) + ".png"
            while frameIndex < begin_poi_frame + 2:
                import os
                img = cv.QueryFrame(capturer)
                frameIndex += 1
                if img is None:
                    break
            print "Generating image: ", frameIndex
            cv.SaveImage(os.path.join(imagepath, myname), img)

            poi = (self.frame_to_time(begin_poi_frame), \
                   self.frame_to_time(last_transition_frame), \
                   myname)
            points_of_interest.append(poi)

        return nFrames / self._fps, points_of_interest
Example #10
0
    cv.ShowImage("result", img)

if __name__ == '__main__':

    #cascade = cv.Load("haarcascade_eye_tree_eyeglasses.xml")
    cascade = cv.Load("haarcascade_frontalface_alt.xml")
    #cascade = cv.Load("cars3.xml")
    #cascade = cv.Load("haarcascade_upperbody.xml")
    #cascade = cv.Load("aGest.xml")
    #cascade = cv.Load("haarcascade_fullbody.xml")
    #cascade = cv.Load("closed_frontal_palm.xml")
    #cascade = cv.Load("palm.xml")
    #cascade = cv.Load("smile.xml")
    capture = cv.CreateCameraCapture(2)  # camera   NO. 0, 1, 2
    print cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)

    cv.NamedWindow("result", 1)
    frame = cv.QueryFrame(capture)

    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)

    if capture:
        frame_copy = None
        while True:
            frame = cv.QueryFrame(capture)

            if not frame:
                #print 'hehe'
                #cv.WaitKey(0)
Example #11
0
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width, img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(
        img.width / image_scale), cv.Round(img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
    cv.EqualizeHist(small_img, small_img)

    if (cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade,
                                     cv.CreateMemStorage(0), haar_scale,
                                     min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "time taken for detection = %gms" % (
            t / (cv.GetTickFrequency() * 1000.))
    if faces:
        for ((x, y, w, h), n) in faces:
            # the input to cv.HaarDetectObjects was resized, so scale the
            # bounding box of each face and convert it to two CvPoints
            pt1 = (int(x * image_scale), int(y * image_scale))
            pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
            cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)

        cv.ShowImage("video", img)

    if __name__ == '__main__':

        parser = OptionParser(
            usage="usage: %prog [options] [filename|camera_index]")
        parser.add_option(
            "-c",
            "-cascade",
            action="store",
            dest="cascade",
            type="str",
            help="Haar cascade file, default %default",
            default="../data/haarcascades/haarcascade_frontalface_alt.xml")(
                options, args) = parser.parse_args()

    cascade = cv.Load(options.cascade)

    if len(args) != 1:
        parser.print_help()
        sys.exit(1)

    input_name = args[0]
    if input_name.isdigit():
        capture = cv.CreateCameraCapture(int(input_name))
    else:
        capture = None

    cv.NamedWindow("video", 1)

    #size of the video
    width = 160
    height = 120

    if width is None:
        width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
    else:
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)

    if height is None:
        height = int(
            cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
    else:
        cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)

    if capture:
        frame_copy = None
    while True:

        frame = cv.QueryFrame(capture)
        if not frame:
            cv.WaitKey(0)
            break
        if not frame_copy:
            frame_copy = cv.CreateImage((frame.width, frame.height),
                                        cv.IPL_DEPTH_8U, frame.nChannels)

        if frame.origin == cv.IPL_ORIGIN_TL:
            cv.Copy(frame, frame_copy)
        else:
            cv.Flip(frame, frame_copy, 0)

    detect_and_draw(frame_copy, cascade)

    if cv.WaitKey(10) >= 0:
        break
    else:
        image = cv.LoadImage(input_name, 1)
        detect_and_draw(image, cascade)
        cv.WaitKey(0)

    cv.DestroyWindow("video")
Example #12
0
import numpy as np
import cv2
import cv2.cv as cv
import time

# Use 0 for the onboard webcam.
# Use 1 for an external webcam.
#cap = cv2.VideoCapture("jellyfish_video.mp4")
cap = cv2.cv.CaptureFromFile("jellyfish_video.mp4")
#print cap.grab()

nframes = int(cv2.cv.GetCaptureProperty(cap, cv2.cv.CV_CAP_PROP_FRAME_COUNT))

print nframes

prev = cv.QueryFrame(cap)
for f in xrange(nframes):
    frameimg = cv.QueryFrame(cap)
    print " currpos of videofile", cv.GetCaptureProperty(
        cap, cv.CV_CAP_PROP_POS_MSEC)
    print " index of frame", cv.GetCaptureProperty(cap,
                                                   cv.CV_CAP_PROP_POS_FRAMES)
    print type(frameimg)
    diff = frameimg - prev
    cv.ShowImage("hcq", diff)
    #cv.ShowImage("hcq",frameimg)
    time.sleep(0.5)
    cv.WaitKey(1)
Example #13
0
def parseVedio(_,filename,Id,url):

    def ifFace(img,size):
        gray=cv.CreateImage(size,8,1)
        cv.CvtColor(img,gray,cv.CV_BGR2GRAY)
        newMem1=cv.CreateMemStorage(0)
        newMem2=cv.CreateMemStorage(0)
        newMem3=cv.CreateMemStorage(0)
        cv.EqualizeHist(gray,gray)
        face=cv.HaarDetectObjects(gray,c_f,newMem1,1.2,3,cv.CV_HAAR_DO_CANNY_PRUNING,(50,50))
        mouth=cv.HaarDetectObjects(gray,c_m,newMem2,1.2,2,cv.CV_HAAR_DO_CANNY_PRUNING,(10,10))
        body=cv.HaarDetectObjects(gray,c_m,newMem3,1.2,2,cv.CV_HAAR_DO_CANNY_PRUNING,(100,100))
        if face and mouth or body:
            cv.SaveImage("img/out.jpg",img)
            return 1
        else:
            return 0

    capture=cv.CaptureFromFile(filename)
    width=cv.GetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_WIDTH)
    height=cv.GetCaptureProperty(capture,cv.CV_CAP_PROP_FRAME_HEIGHT)
    size=(int(width),int(height))
    fps=15
    i=0
    count=[0]

    def scanFaces(src):
        total=0
        c=cv.CloneImage(src)
        frams=[]
        frams.append(src)  # 原图

        cv.Flip(c,None,0) 
        frams.append(c)  # 水平翻转后的

        dst=cv.CreateImage((src.height,src.width),
                src.depth,src.channels)
        cv.Transpose(src,dst)
        cv.Flip(dst,None,0)
        frams.append(dst) # 逆时针90
        
        c2=cv.CloneImage(src)
        cv.Flip(c2,None,0) 
        dst=cv.CreateImage((src.height,src.width),
                src.depth,src.channels)
        cv.Transpose(c2,dst)
        frams.append(dst) # 顺时针90

        for i,img in enumerate(frams):
            count[0]+=ifFace(img,(img.width,img.height))

        if count[0]>=15:
            return True
        else:
            return False
            

    while True:
        img=cv.QueryFrame(capture)
        if not img:break
        if int((i+1)%fps)==0:
            if scanFaces(img):
                mess="%s:有脸"%filename
                yesfd.write("%s %s\n"%(Id,url))
                yesfd.flush()
                print mess
                return None
        i+=1
    mess="%s:无脸"%filename
    nofd.write("%s %s\n"%(Id,url))
    nofd.flush()
    print mess
Example #14
0
    def __init__(self):
        rospy.init_node('avi2ros', anonymous=True)
        
        self.input = rospy.get_param("~input", "")
        self.output = rospy.get_param("~output", "video_output")       
        self.fps = rospy.get_param("~fps", 25)
        self.loop = rospy.get_param("~loop", False)
        self.width = rospy.get_param("~width", "")
        self.height = rospy.get_param("~height", "")
        self.start_paused = rospy.get_param("~start_paused", False)
        self.show_viz = not rospy.get_param("~headless", False)
        self.show_text = True

        image_pub = rospy.Publisher(self.output, Image, queue_size=10)
        
        rospy.on_shutdown(self.cleanup)
        
        video = cv.CaptureFromFile(self.input)
        fps = int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FPS))
        
        """ Bring the fps up to the specified rate """
        try:
            fps = int(fps * self.fps / fps)
        except:
            fps = self.fps
    
        if self.show_viz:
            cv.NamedWindow("AVI Video", True) # autosize the display
            cv.MoveWindow("AVI Video", 650, 100)

        bridge = CvBridge()
                
        self.paused = self.start_paused
        self.keystroke = None
        self.restart = False
        
        # Get the first frame to display if we are starting in the paused state.
        frame = cv.QueryFrame(video)
        image_size = cv.GetSize(frame)
        
        if self.width and self.height and (self.width != image_size[0] or self.height != image_size[1]):
            rospy.loginfo("Resizing! " + str(self.width) + " x " + str(self.height))
            resized_frame = cv.CreateImage((self.width, self.height), frame.depth, frame.channels)
            cv.Resize(frame, resized_frame)
            frame = cv.CloneImage(resized_frame)
                        
        text_frame = cv.CloneImage(frame)
        cv.Zero(text_frame)
    
        while not rospy.is_shutdown():
            """ Handle keyboard events """
            self.keystroke = cv.WaitKey(1000 / fps)

            """ Process any keyboard commands """
            if 32 <= self.keystroke and self.keystroke < 128:
                cc = chr(self.keystroke).lower()
                if cc == 'q':
                    """ user has press the q key, so exit """
                    rospy.signal_shutdown("User hit q key to quit.")
                elif cc == ' ':
                    """ Pause or continue the video """
                    self.paused = not self.paused
                elif cc == 'r':
                    """ Restart the video from the beginning """
                    self.restart = True
                elif cc == 't':
                    """ Toggle display of text help message """
                    self.show_text = not self.show_text
                
            if self.restart:
                #video = cv.CaptureFromFile(self.input)
                print "restarting video from beginning"
                cv.SetCaptureProperty(video, cv.CV_CAP_PROP_POS_AVI_RATIO, 0)
                self.restart = None
    
            if not self.paused:
                frame = cv.QueryFrame(video)
                if frame and self.width and self.height:
                    if self.width != image_size[0] or self.height != image_size[1]:
                        cv.Resize(frame, resized_frame)
                        frame = cv.CloneImage(resized_frame)
                
            if frame == None:
                if self.loop:
                    self.restart = True
            else:
                if self.show_text:
                    frame_size = cv.GetSize(frame)
                    text_font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 0.2, 1, 0, 1, 8)
                    cv.PutText(text_frame, "Keyboard commands:", (20, int(frame_size[1] * 0.6)), text_font, cv.RGB(255, 255, 0))
                    cv.PutText(text_frame, " ", (20, int(frame_size[1] * 0.65)), text_font, cv.RGB(255, 255, 0))
                    cv.PutText(text_frame, "space - toggle pause/play", (20, int(frame_size[1] * 0.72)), text_font, cv.RGB(255, 255, 0))
                    cv.PutText(text_frame, "     r - restart video from beginning", (20, int(frame_size[1] * 0.79)), text_font, cv.RGB(255, 255, 0))
                    cv.PutText(text_frame, "     t - hide/show this text", (20, int(frame_size[1] * 0.86)), text_font, cv.RGB(255, 255, 0))
                    cv.PutText(text_frame, "     q - quit the program", (20, int(frame_size[1] * 0.93)), text_font, cv.RGB(255, 255, 0))
                
                cv.Add(frame, text_frame, text_frame)
                if self.show_viz:
                    cv.ShowImage("AVI Video", text_frame)
                cv.Zero(text_frame)
                
                try:
                    test = np.asarray(frame[:,:])
                    publishing_image = bridge.cv2_to_imgmsg(test, "bgr8")
                    image_pub.publish(publishing_image)
                except CvBridgeError, e:
                    print e         
 def get_size(self):
     w = int(cv.GetCaptureProperty(self.cam, cv.CV_CAP_PROP_FRAME_WIDTH))
     h = int(cv.GetCaptureProperty(self.cam, cv.CV_CAP_PROP_FRAME_HEIGHT))
     return (w, h)
 def get_fps(self):
     fps = cv.GetCaptureProperty(self.cam, cv.CV_CAP_PROP_FPS)
     return fps if fps != -1 else 30.0
Example #17
0
import cv2
import cv2.cv as cv
#c= cv2.VideoCapture("Tack1.mp4")
#print c.grab() # returns false. however returns true if use avi file.

cap = cv.CaptureFromFile("Tack1.Mp4")

nframes = int(cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_COUNT))
fps = int(cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FPS))
print "total frame", cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_COUNT)
print "fps", fps
print " currpos of videofile", cv.GetCaptureProperty(cap,
                                                     cv.CV_CAP_PROP_POS_MSEC)
waitpermillisecond = int(1 * 1000 / fps)
print "waitpermillisecond", waitpermillisecond
print cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FOURCC)

for f in xrange(nframes):
    frameimg = cv.QueryFrame(cap)
    print " currpos of videofile", cv.GetCaptureProperty(
        cap, cv.CV_CAP_PROP_POS_MSEC)
    print " index of frame", cv.GetCaptureProperty(cap,
                                                   cv.CV_CAP_PROP_POS_FRAMES)
    cv.ShowImage("hcq", frameimg)
    cv.WaitKey(1)

cv.DestroyAllWindows("hcq")
Example #18
0
import cv2
import cv2.cv as cv

detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cam = cv2.VideoCapture("video1.mp4")
cap = cv.CaptureFromFile("video1.mp4")

nframes = int(cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_COUNT))
fps = int(cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FPS))

print nframes

Id = raw_input('enter your id')
sampleNum = 0

for f in xrange(nframes):
    ret, frameimg = cam.read()
    gray = cv2.cvtColor(frameimg, cv2.COLOR_BGR2GRAY)
    faces = detector.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(frameimg, (x, y), (x + w, y + h), (255, 0, 0), 2)

        #incrementing sample number
        sampleNum = sampleNum + 1
        #saving the captured face in the dataset folder
        cv2.imwrite("dataSet/User." + Id + '.' + str(sampleNum) + ".jpg",
                    gray[y:y + h, x:x + w])
        cv2.imshow('frame', frameimg)

    cv.WaitKey(1)
Example #19
0
import cv2.cv as cv

capture = cv.CaptureFromFile('img/paulvideo.avi')

nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
wait = int(1 / fps * 1000 / 1)

dst = cv.CreateImage(
    (int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)),
     int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))), 8, 1)

for f in xrange(nbFrames):

    frame = cv.QueryFrame(capture)

    cv.CvtColor(frame, dst, cv.CV_BGR2GRAY)
    cv.Canny(dst, dst, 125, 350)
    cv.Threshold(dst, dst, 128, 255, cv.CV_THRESH_BINARY_INV)

    cv.ShowImage("The Video", frame)
    cv.ShowImage("The Dst", dst)
    cv.WaitKey(wait)
Example #20
0
def main(argv = sys.argv):
    file_name, frames_to_start = get_args(argv) 

    # is 1400, for falconDitto, 150 for mangoFalco
    
    cap = cv2.VideoCapture(file_name)
    
    fps = cv.GetCaptureProperty(cv.CaptureFromFile(file_name), cv.CV_CAP_PROP_FPS)
    print "Frames Per Second: " + str(fps)
    # hardcode to find start of match now...should be able to find this programmatically
    for i in range(1, frames_to_start):
        cap.read()    
    ret, frame = read_and_preprocess_frame(cap, file_name)

    # find where percentages are using template matching
    # load the zero template, use matchTemplate to find spots which are closest to it
    number_templates = load_resources()
    zero = number_templates[0]
    # locations_found is the places where we think the zeros are
    # its an list of x,y pairs
    locations_found = find_zeros(frame, zero, DIFF_METHOD) 
    extended_locations_found, _ = extend_locations(locations_found)
    # draw a rectangle around each location, using hardcoded values of size of percents
    draw_around_percents(frame, extended_locations_found)

    _, previous_frame = read_and_preprocess_frame(cap, file_name)
    prev_stability = False
    frames_elapsed = 0
    percent_series_1 = []
    percent_series_2 = []
    time_series = []
    while(cap.isOpened()):
        ret ,frame = read_and_preprocess_frame(cap, file_name)
        frames_elapsed += 1
        if ret == True:
            cv2.imshow('frame', frame)
            # cv2.waitKey(0)
            if not compare_with_previous(previous_frame, frame, locations_found):
                # percentage will shake around, making it unstable
                # wait until stable again to look for difference between it and previous one
                cur_stability = False
            else: 
                cur_stability = True
            # if we've stabilized, check both percentages to see whats changed
            if cur_stability and not prev_stability:
                best_guesses = []
                for idx, location in enumerate(extended_locations_found):
                    candidate = frame[location[1]:location[1] + HEIGHT + (2 * BUFFER_SIZE), location[0]:location[0] + WIDTH + (2 * BUFFER_SIZE)]
                    # cv2.imshow('candidate', candidate)
                    # cv2.waitKey(0)
                    best_guess = match_to_number(candidate, number_templates)
                    # print "location: " + str(idx)
                    # print "guessed percent: " + str(best_guess)
                    best_guesses.append(best_guess)
                percent_1 = calculate_total_percent(best_guesses[0], best_guesses[1], best_guesses[2])
                percent_2 = calculate_total_percent(best_guesses[3], best_guesses[4], best_guesses[5])
                time_elapsed = float(frames_elapsed)/fps
                print "Location 1: " + str(percent_1) + " Location 2: " + str(percent_2) + " at frame " + str(frames_elapsed)
                percent_series_1.append(percent_1)
                percent_series_2.append(percent_2)
                time_series.append(frames_elapsed)
                #cv2.waitKey(0)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break   
            prev_stability = cur_stability
        else:
            break
        previous_frame = frame
    if not os.path.exists('data'):
        os.makedirs('data')
    file_name_stripped = file_name.split('.')[0]
    f = open('data/' + file_name_stripped + '.csv','w')
    for idx, time_stamp in enumerate(time_series):
        f.write(str(time_stamp) + ', ' + str(percent_series_1[idx]) + ', ' + str(percent_series_2[idx]) + '\n')
    f.close()
               
    cv2.destroyAllWindows()
    cap.release()
import cv2.cv as cv

capture = cv.CaptureFromFile('img/micnew.avi')

#-- Informations about the video --
nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
wait = int(1 / fps * 1000 / 1)
width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
#For recording
#codec = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FOURCC)
#writer=cv.CreateVideoWriter("img/output.avi", int(codec), int(fps), (width,height), 1) #Create writer with same parameters
#----------------------------------

prev_gray = cv.CreateImage((width, height), 8, 1)  #Will hold the frame at t-1
gray = cv.CreateImage((width, height), 8, 1)  # Will hold the current frame

output = cv.CreateImage((width, height), 8, 3)

prevPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)
currPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)

max_count = 500
qLevel = 0.01
minDist = 10

begin = True

initial = []
features = []
Example #22
0
#! usr/bin/python
#coding=utf-8
'''
Read and display local 'video_demo.avi' file
'''
import cv2
import cv2.cv as cv

capture = cv.CaptureFromFile('video_demo_1.avi')

nbFrames = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))

#CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream
#CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream

fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)

wait = int(1 / fps * 1000 / 1)

duration = (nbFrames * fps) / 1000

print 'Num. Frames = ', nbFrames
print 'Frame Rate = ', fps, 'fps'
print 'Duration = ', duration, 'sec'

for f in xrange(nbFrames):
    frameImg = cv.QueryFrame(capture)
    print cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_POS_FRAMES)
    cv.ShowImage("The Video", frameImg)
    if cv2.waitKey(1) == 27:
        break  # esc to quit
Example #23
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import cv2.cv as cv
import cv2

if __name__ == '__main__':
    capture = cv.CaptureFromCAM(0)
    dst = cv.CreateImage(
        (int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH)),
         int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))), 8,
        1)
    while True:
        frame = cv.QueryFrame(capture)
        cv.CvtColor(frame, dst, cv.CV_BGR2GRAY)
        cv.Canny(dst, dst, 125, 350)
        cv.Threshold(dst, dst, 128, 255, cv.CV_THRESH_BINARY_INV)
        cv.ShowImage("The Video", frame)
        cv.ShowImage("The Dst", dst)
        c = cv.WaitKey(1)
        if c == 27:  #Esc on Windows
            break
Example #24
0
 def run(self):
     self.text = ""
     play = True
     delay = 200
     pos = 0
     newPos = 0
     skipping = False
     print("")
     print("start running!")
     print("")
     while True:
         if play or (newPos != pos):
             while int(self.iframe) >= int(pos):
                 if int(self.iframe) > int(pos):
                     print('Skip frame --' + str(int(pos)) + ' [%d %%]' %
                           (100.0 * pos / self.iframe))
                     skipping = True
                 if cv.GrabFrame(self.capture):
                     pos = cv.GetCaptureProperty(self.capture,
                                                 cv.CV_CAP_PROP_POS_FRAMES)
                     newPos = pos
                     self.src = cv.RetrieveFrame(self.capture)
                     self.checkSaveImage(pos)
                     self.draw()
                 else:
                     print(
                         "Grab frame failed! It must be the end of the video."
                     )
                     self.finalize()
                     exit(0)
             self.iframe = int(pos)
         if play:
             # if playing, wait the necessary time to maintain the fps
             if skipping:
                 play = False
                 skipping = False
             c = cv.WaitKey(delay)
         else:
             # if paused, do is as fast as possible (without screw)
             c = cv.WaitKey(10)
         if not play:
             if self.handleMouse():
                 # if saved a new plate, step to the next frame
                 newPos = pos + 1
         if c == -1:
             continue
         c &= 0xFF
         if c == 32:  # backspace
             play = not play
             if play:
                 self.printf("    PLAY")
                 cv.SetMouseCallback("Camera", self.on_dummy, param=pos)
             else:
                 self.printf("              -> PAUSE")
                 cv.SetMouseCallback("Camera", self.on_mouse, param=0)
         elif c == 82:  # arrow up
             delay = 25
         elif c == 84:  # arrow down
             delay = 200
         elif c == 27:
             #print 'Are you sure about closing this program?'
             #opt = input('If the answer is "yes", type the result of 2x2: ')
             c = cv.WaitKey(0) & 0xFF
             if c == 10:
                 print('Good bye, cruel world!')
                 self.finalize()
                 break
             else:
                 print('To exit, press ESC followed by ENTER')
Example #25
0
def compute(playerList, video):
    videoName = video
    capture = cv.CaptureFromFile(videoName)

    count = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
    fps = cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS)
    width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
    height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))

    # store the last frame
    preFrame = cv.CreateImage((width, height), 8, 1)
    # store the current frame
    curFrame = cv.CreateImage((width, height), 8, 1)

    prePyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)
    curPyr = cv.CreateImage((height / 3, width + 8), 8, cv.CV_8UC1)

    numOfPlayers = len(playerList)

    # store players moving distance
    players = np.zeros(numOfPlayers)

    # store players position of last frame
    prePlayers = playerList
    # store players position of current frame
    curPlayers = []

    img = cv.CreateImage((width, height), 8, 1)

    #flag of storing player info
    flagInfo = True

    for f in xrange(count):
        frame = cv.QueryFrame(capture)

        if (flagInfo):
            cv.CvtColor(frame, img, cv.CV_BGR2GRAY)
            for i in range(numOfPlayers):
                font = cv.InitFont(cv.CV_FONT_HERSHEY_SCRIPT_SIMPLEX, 0.4, 0.4,
                                   0, 2, 3)

                cv.PutText(
                    img, str(i),
                    (int(prePlayers[i][0][0]), int(prePlayers[i][0][1])), font,
                    (255, 255, 255))
            cv.SaveImage(playerInfo, img)
            flagInfo = False

        #Convert to gray
        cv.CvtColor(frame, curFrame, cv.CV_BGR2GRAY)

        #Calculate the movement using the previous and the current frame using the previous points
        curPlayers, status, err = cv.CalcOpticalFlowPyrLK(
            preFrame, curFrame, prePyr, curPyr, prePlayers, (10, 10), 3,
            (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03), 0)

        ###temp = frame
        # add new distance to list
        for i in range(numOfPlayers):
            players[i] += getDistance(prePlayers[i], curPlayers[i])
            ###cv.Line(temp, (int(prePlayers[i][0]), int(prePlayers[i][1])), (int(curPlayers[i][0]), int(curPlayers[i][1])), (255,122,122),3)

        ###cv.ShowImage("test", temp)
        ###cv2.waitKey(20)

        #Put the current frame preFrame
        cv.Copy(curFrame, preFrame)
        prePlayers = curPlayers
    ###cv2.destroyAllWindows()
    # print distance
    i = 0
    f = open(recordFile, 'w')
    for player in players:
        i += 1
        print "player", i, "running distance: ", player, "\n"
        f.write("player" + str(i) + " running distance: " + str(player) +
                "meters\n")
Example #26
0
def main():
    cap = cv.CaptureFromCAM(0)
    cv.NamedWindow("camera", cv.CV_WINDOW_NORMAL)
    cv.SetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_WIDTH, 720)
    cv.SetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_HEIGHT, 540)
    cols = int(cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_WIDTH))
    rows = int(cv.GetCaptureProperty(cap, cv.CV_CAP_PROP_FRAME_HEIGHT))
    grey = cv.CreateImage((cols, rows), 8, 1)
    cumulated = cv.CreateImage((cols, rows), 8, 1)

    equalize = True
    laplace = False

    settings = {
        "canny_avg": 10,
    }

    threshold1 = 600
    threshold2 = 200

    settings_names = sorted(settings.keys())
    setting_current = 0
    setting_name = settings_names[setting_current]

    while True:
        im = cv.QueryFrame(cap)
        cv.CvtColor(im, grey, cv.CV_BGR2GRAY)

        if equalize:
            cv.Smooth(grey, grey, param1=5, param2=5)
            cv.EqualizeHist(grey, grey)

        if laplace:
            cv.Canny(grey, grey, threshold1, threshold2)
            avg = cv.Avg(cumulated)[0]
            if avg > settings["canny_avg"] * 1.2:
                threshold1 *= 1.1
                threshold2 = threshold1 / 2.5
            if avg < settings["canny_avg"] / 1.2:
                threshold1 /= 1.1
                threshold2 = threshold1 / 2.5

        cv.ShowImage("camera", grey)

        key = cv.WaitKey(1)
        if key not in (-1, 1114085, 1245157): # None, block
            print("Key %d" % key)
            if key in ( # Capture one frame
                1048675, # c
                99, # c
                ):
                filenames = save_image(cap, 1)
                print("Capturing: %s" % ", ".join(list(filenames)))
            if key in ( # Capture ten frames
                1114179, # C
                1179715, # C (block)
                65603, # C
                131139, # C (block)
                ):
                filenames = save_image(cap, 10)
                print("Capturing: %s" % ", ".join(list(filenames)))

            elif key in ( # Toggle equalization
                1114181, # e
                1048677, # E
                1179717, # E (block)
                1245285, # e (block)
                101,     # e
                65605,   # E
                131141,  # E (block)
                196709,  # e (block)
                ):
                equalize = not equalize
                print("Equalize: %s" % equalize)

            elif key in ( # Toggle laplace
                1179724, # l
                1048684, # L (block(
                1114188, # L
                108, 
                65612,
                131148,
                196716,
                ):
                laplace = not laplace 
                print("Laplace: %s" % laplace)

            elif key in ( # Increment value
                1113938, # Up
                65362,
                ):
                settings[setting_name] += 1
                print("%s := %d" % (setting_name, settings[setting_name]))

            elif key in ( # Decrement value
                1113940, # Down
                65364,
                ):
                settings[setting_name] -= 1
                print("%s := %d" % (setting_name, settings[setting_name]))

            elif key in ( # Next setting
                1113939, # Right
                65363,
                ):
                setting_current = (setting_current + 1) % len(settings_names)
                setting_name = settings_names[setting_current]
                print("%s : %d" % (setting_name, settings[setting_name]))

            elif key in ( # Prev setting
                1113937, # Left
                65361,
                ):
                setting_current = (setting_current - 1) % len(settings_names)
                setting_name = settings_names[setting_current]
                print("%s : %d" % (setting_name, settings[setting_name]))

            elif key in ( # Exit
                27, # ESC
                1048603, # ESC
                1114193, # q
                1048689, # Q
                1179729, # Q (block)
                1245297, # q (block)
                113,
                65617,
                131153,
                196721,
                ):
                break
Example #27
0
    sys.exit()

cv.NamedWindow("camera", 1)
capture = cv.CreateCameraCapture(0)

f = open('data/' + subject + '/' + video + '/diagnostics.txt', 'w')
#font = cv.CvFont
font = cv.InitFont(1, 1, 1, 1, 1, 1)

width = None
height = None
width = 320
height = 240

if width is None:
    width = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
else:
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH, width)

if height is None:
    height = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
else:
    cv.SetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT, height)

result = cv.CreateImage((width, height), cv.IPL_DEPTH_8U, 3)

mqLoop = 0

#openCV functions

Example #28
0
def convertToPngs(movieName, frameOutName, wdir='', \
     startFrame=0, endFrame=499, maxDim = 128):
    """
	Converts a saved movie into a collection of png frames

		movieName: name of movie file

		frameOutName: prefix of each frame to be written out
						should not have image type at the end

		wdir: working directory (i.e. where the movie is and
				where the frames will be written). In general
				this should be its own directory for each movie,
				since there are many frames in a given movie.

		startFrame: first frame # to be written out

		endFrame: last frame # to be written out

		maxDim: the maximum number of elements in any one dimension
				of the output image. This should be an integer, but
				if maxDim = False, then it will save the frames
				in their original size.
	"""
    # change to working directory
    os.chdir(wdir)
    # strip frame prefix of unnecessary suffixes
    frameOutName = frameOutName.replace(".png", '')
    frameOutName = frameOutName.replace(".jpeg", '')

    # initiate movie stream
    capture = cv.CaptureFromFile("C:\Users\Tom\Desktop\doom.mp4")

    # extract frame size
    nCols = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
    nRows = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
    size = (nRows, nCols)
    maxFrameDim = max(size)

    # compute rescaling required based upon input
    #scale =float(maxFrameDim)/float(maxDim)
    newSize = size  #(int(floor(size[0]/scale + .5)), int(floor(size[1]/scale + .5)) )

    # extract number of frames in video
    NframesTot = int(cv.GetCaptureProperty(capture,
                                           cv.CV_CAP_PROP_FRAME_COUNT))

    k = 0

    # loop over frames, writing out those in desired range.
    for k in xrange(NframesTot):
        # i assume that there is no way to start at a particular frame
        # and that we have to loop over all of them sequentially
        frame = cv.QueryFrame(capture)

        if k >= startFrame:
            # TODO: we could put this in a try, except condition,
            # but I'm happy to just let it fail naturally if there is a problem
            # since it is writing out the frames as it progresses, we won't
            # lose anything.
            if maxDim:
                smallFrame = cv.CreateImage(newSize, frame.depth,
                                            frame.nChannels)
                cv.Resize(frame, smallFrame)
                frame = smallFrame
            cv.SaveImage(frameOutName + "{0:04d}.png".format(k), frame)

        if k >= endFrame:
            break
        k += 1
    print '\n\nConverted {0} frames'.format(k)
    return 0