def processAndAnalyzeVideo( truth_cap, input_cap, csv_writer, params,
        model, writer, diffWriter, overlayWriter, height, width ):

    # Calculate and display megapixels.
    megapixels = height * width / 1000000.0
    if not params.no_out:
        print "megapixels: %g" % megapixels
    
    # Truth and input must have same width/height, and have same number of frames!
    width = int(input_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(input_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    # Init running totals
    frame_number = 0
    videoStats = e.TruthComparisonStats()
    
    if params.doEval:
        if not params.no_out:
            print "Synchronizing ground truth with input..."
        numberOfFramesTheInputIsAheadBy = int(
            input_cap.get(cv2.CAP_PROP_POS_FRAMES)
            - truth_cap.get(cv2.CAP_PROP_POS_FRAMES)
        )
        # Fast forward the ground truth if necessary.
        for i in range( 0, numberOfFramesTheInputIsAheadBy ):
            truth_ret = truth_cap.grab()

            if not truth_ret:
                raise Exception( "Ground truth does not have enough frames." )
        numberOfFramesTheTruthIsAheadBy = -1*numberOfFramesTheInputIsAheadBy
        # Fast forward the input if necessary.
        for i in range( 0, numberOfFramesTheTruthIsAheadBy ):
            input_ret = input_cap.grab()

            if not input_ret:
                raise Exception( "Input truth does not have enough frames." )

    frame_number = input_cap.get(cv2.CAP_PROP_POS_FRAMES)
    try:
        # Keep track of the last input key, the frames, and time.
        k = 0
        processedFrames = 0
        evalFrames = 0
        startTime = time.time()
        hasReachedFirstEvalFrame = False
        
        while not k == 27 and frame_number <= params.stopFrame:
            truth_frame = 1 # Set to non null in case not in eval mode.
            if params.doEval:
                truth_ret, truth_frame = truth_cap.read()
            input_ret, input_frame = input_cap.read()
            input_frame_overlay = input_frame

            if truth_frame is None or input_frame is None:
                break

            if params.showInput:
                cv2.imshow('Input', input_frame)
            if params.showTruth and params.doEval:
                cv2.imshow('truth_frame', truth_frame )
            k = cv2.waitKey(20)
            
            if not params.no_out:
                print "Frame:", frame_number
            
            input_frame = vibe.preprocess_frame(input_frame, params.pyrNum)
            
            toShow, toFile = processFrame( 
                model, writer, input_frame, height, width, params )
            processedFrames += 1
            
            # Skip the frame if does not contain evaluation.
            if params.doEval and not hasReachedFirstEvalFrame:
                hasReachedFirstEvalFrame = (
                    not isTruthFrameSkippable(truth_frame)
                )

            if params.doEval and hasReachedFirstEvalFrame:
                evalFrames += 1
                truthComparisonStats, diffFrame = \
                    e.CalculateFrameStats( height, width, truth_frame, toFile,
                        params.showDiff )
                metaStats = e.TruthMetaComparisonStats( truthComparisonStats )
                
                if params.showDiff:
                    diffWriter.write( diffFrame )
                    cv2.imshow('diffFrame', diffFrame )

                # Output running stats
                if not params.no_out:
                    truthComparisonStats.printOut( "Frame", frame_number)
                    metaStats.printOut( "     ", frame_number)

                # Write running stats
                if not params.no_csv:
                    frameStats = e.FrameStats(truthComparisonStats, metaStats)
                    csvArray = frameStats.GetCSVArray( "Frame", frame_number )
                    csv_writer.writerow( csvArray )

                # Update totals
                videoStats.accumulate(truthComparisonStats)

            # Display statistics.
            endTime = time.time()
            totalTime = endTime - startTime
            timeForEachFrame = totalTime / processedFrames
            
            if not params.no_out:
                print "average seconds for each frame: %f" % timeForEachFrame
                print "average megapixels a second: %f" % (megapixels / 
                    timeForEachFrame)

            # Show the results with detection and write it to the file buffer.
            applyDetection(toShow, input_frame_overlay)
            cv2.imshow('Processing Results', toShow)
            writer.write(toFile)

            cv2.imshow('size', input_frame)
            k = cv2.waitKey(1)

            if params.showOverlay:
                overlayWriter.write( input_frame_overlay )
                cv2.imshow('overlayFrame', input_frame_overlay )


            # Grab the key pressed.
            k = cv2.waitKey(100)
            
            frame_number += 1

    except KeyboardInterrupt:
        pass
    return frame_number, processedFrames, evalFrames, videoStats