Ejemplo n.º 1
0
def main():
    # Display error if the program needs more args
    if len(sys.argv) < MIN_ARGS + 1:
        print("usage: %s <video file>" % sys.argv[0])
        return

    # Grab the argument.
    source_path = sys.argv[1]

    # Open the video.
    cap = cv2.VideoCapture(source_path)
    if not cap.isOpened():
        print 'Error--Unable to open video:', source_path
        return

    # Split the extension of the source video.
    sourceName, file_extension = os.path.splitext(source_path)

    # Get video parameters (try to retain same attributes for output video)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = float(cap.get(cv2.CAP_PROP_FPS))
    codec = int(cap.get(cv2.CAP_PROP_FOURCC))

    # Create an a video writer so that the resuls can be saved.
    output_path = sourceName + '_outvibe' + file_extension
    writer = cv2.VideoWriter(output_path, codec, fps, (width, height))
    writerFilter = cv2.VideoWriter("FilteredOutput.avi", codec, fps, (width, height))
    if not writer:
        print 'Error--Could not write to magnitude video:', output_path
        return

    # Initialize initial ViBE background model
    startTime = time.time()
    model = vibe.Model(cap)
    endTime = time.time()

    # Display time results.
    totalTime = endTime - startTime
    print "init time: %g" % totalTime

    # Calculate and display megapixels.
    megapixels = height * width / 1000000.0
    print "megapixels: %g" % megapixels

    # Protect against an error, so the video can be saved regardless.
    try:
        # Keep track of the last input key, the frames, and time.
        k = 0
        frames = 0
        startTime = time.time()

        # Main frame processing loop
        # Loop until the user presses escape.
        while not k == 27:
            # Grab next frame.
            ret, frame = vibe.get_frame(cap)
            if not ret:
                break

            # Run ViBe on the current frame to update the model.
            frameStartTime = time.time()
            model.update(frame)
            frameEndTime = time.time()

            # Display statistics.
            print "Frame:", frames
            frames = frames + 1
            endTime = time.time()
            totalTime = endTime - startTime
            print "seconds this frame: %f" % (frameEndTime - frameStartTime)
            timeForEachFrame = totalTime / frames
            print "average seconds for each frame: %f" % timeForEachFrame
            print "average megapixels a second: %f" % (megapixels / timeForEachFrame)

            # Overlay the current frame with the results.
            # channels = cv2.split(frame)
            # blank_image = numpy.zeros((height, width), numpy.uint8)
            # combined = model.foreGround

            outImageUp = cv2.pyrUp(model.foreGround)
            frame = cv2.pyrUp(frame)


            closing = postProcessing(outImageUp)
            contourDetection(closing, frame, writerFilter)

            channel = np.zeros((height, width, 1), np.uint8)
            combined = cv2.merge((
                cv2.bitwise_or(channel, outImageUp),
                cv2.bitwise_or(channel, outImageUp),
                cv2.bitwise_or(channel, outImageUp)
            ))

            """combinedFiltered = cv2.merge((
                cv2.bitwise_or(channel, closing),
                cv2.bitwise_or(channel, closing),
                cv2.bitwise_or(channel, closing)
            ))"""

            # Show the results and write it to the file buffer.
            cv2.imshow('image', combined)
            #cv2.imshow('Filtered', combinedFiltered)
            writer.write(combined)
            #writerFilter.write(combinedFiltered)

            # Grab the key pressed.
            k = cv2.waitKey(100)
    except KeyboardInterrupt:
        pass
    finally:
        print "Writing video to file."
        writer.release()
        cap.release()