def wrapper(video_path, city_name, location_name, minm_area, roi):

    # vo = TrafficCongestion(video_path, city_name, location_name)
    # vo.congestion(video_path, minm_area=minm_area,roi=roi,plot_intermediate=False)

    vo = VehicleCounter(video_path, city_name, location_name)
    vo.vehicle_counter(minm_area=minm_area,
                       roi=roi,
                       plot_intermediate=True,
                       check_interval=10,
                       method='quantile',
                       aggregation_time='3T',
                       congestion_threshold=0.20)
Exemple #2
0
def main():
    bg_subtractor = cv2.createBackgroundSubtractorMOG2()

    car_counter = None  # Will be created after first frame is captured

    # Set up image source

    #cap = cv2.VideoCapture("flow.mp4")
    cap = cv2.VideoCapture(URL)
    while True:
        ret, frame = cap.read()
        if not ret:
            print 'failed'
        else:
            if car_counter is None:
                # We do this here, so that we can initialize with actual frame size
                #car_counter = VehicleCounter(frame.shape[:2], frame.shape[1] / 2)
                car_counter = VehicleCounter(frame.shape[:2], DIVIDER1,
                                             DIVIDER2, DIVIDER3, DIVIDER4,
                                             DIVIDER5, DIVIDER6)
                #print frame.shape
            # Archive raw frames from video to disk for later inspection/testing

            processed = process_frame(frame, bg_subtractor, car_counter)

            #cv2.imshow('Source Image', frame)
            cv2.imshow('Processed Image', processed)

            c = cv2.waitKey(10)
            if c == 27:
                break

    cap.release()
    cv2.destroyAllWindows()
def main():
    log = logging.getLogger('main')

    log.debug('Creating backgound subtractor')
    bg_subtractor = cv2.createBackgroundSubtractorKNN(detectShadows=False)
    log.debug('Pre-training the background subtractor')
    # default_bg = cv2.imread(IMAGE_FILENAME_FORMAT % 119)
    # bg_subtractor.apply(default_bg, None, 1.0)

    car_counter = None

    log.debug('Initializing video capture device #%s')
    cap = cv2.VideoCapture(url)

    frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    log.debug('Video capture frame size=(w=%d, h=%d)', frame_width,
              frame_height)
    log.debug('Starting capture loop...')

    frame_number = -1
    while True:
        frame_number += 1
        log.debug('Capturing frame #%d', frame_number)
        ret, frame = cap.read()

        if not ret:
            log.error('Frame capture failed, stopping')
            break

        log.debug('Got frame #%d: shape=%s', frame_number, frame.shape)

        if not car_counter:
            log.debug('Creating vehicle counter')
            car_counter = VehicleCounter(frame.shape[:2], frame.shape[0] / 2)

        log.debug('Processing frame #%d', frame_number)
        processed = process_frame(frame_number, frame, bg_subtractor,
                                  car_counter)
        save_frame(IMAGE_DIR + '/processed_%04d.png', frame_number, processed,
                   'processed frame #%d')

        log.debug('Frame #%d processed', frame_number)

        if __name__ == '__main__':
            cv2.imshow('Source Image', frame)
            cv2.imshow('Processed Image', processed)
            c = cv2.waitKey(WAIT_TIME)
            if c == 27:
                break
        else:
            cv2.imwrite('temp.jpg', processed)
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' +
                   open('temp.jpg', 'rb').read() + b'\r\n')

    log.debug('Closing video capture device')
    cap.release()
    cv2.destroyAllWindows()
def main():
    bg_subtractor = cv2.createBackgroundSubtractorMOG2()
    fgbg = cv2.createBackgroundSubtractorMOG2()
    car_counter = None  # Will be created after first frame is captured

    # Set up image source

    cap = cv2.VideoCapture('v3.mp4')
    #cap = cv2.VideoCapture(URL)
    while True:
        ret, frame = cap.read()
        if not ret:
            print('failed')
        else:
            if car_counter is None:
                # We do this here, so that we can initialize with actual frame size
                #car_counter = VehicleCounter(frame.shape[:2], frame.shape[1] / 2)
                car_counter = VehicleCounter(frame.shape[:2], DIVIDER1,
                                             DIVIDER2, DIVIDER3, DIVIDER4,
                                             DIVIDER5, DIVIDER6)
                #print frame.shape
            # Archive raw frames from video to disk for later inspection/testing
            rows, cols, _ = frame.shape
            #-----------------------------------
            kernel = np.ones((2, 2), np.uint8)

            fgmask = fgbg.apply(frame)
            opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
            #dilation = cv2.dilate(opening, kernel, iterations=1)

            #-----------------------------------

            M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)
            dst = cv2.warpAffine(frame, M, (cols, rows))
            dst2 = cv2.warpAffine(opening, M, (cols, rows))
            processed = process_frame(dst, bg_subtractor, car_counter)
            #M2 = cv2.getRotationMatrix2D((cols/2,rows/2),0,1)
            #dst2 = cv2.warpAffine(processed,M2,(cols,rows))
            #cv2.imshow('Source Image', frame)
            cv2.imshow('Processed Image', processed)
            cv2.imshow('fg', dst2)
            c = cv2.waitKey(10)
            if c == 27:
                break

    cap.release()
    cv2.destroyAllWindows()
def main():
    bg_subtractor = cv2.createBackgroundSubtractorMOG2()

    car_counter = None  # Will be created after first frame is captured

    # Set up image source

    cap = cv2.VideoCapture('s1.mp4')
    #cap = cv2.VideoCapture(URL)
    while True:
        ret, frame = cap.read()
        if not ret:
            print('failed')
        else:
            if car_counter is None:
                # We do this here, so that we can initialize with actual frame size
                #car_counter = VehicleCounter(frame.shape[:2], frame.shape[1] / 2)
                car_counter = VehicleCounter(frame.shape[:2], DIVIDER1,
                                             DIVIDER2, DIVIDER3, DIVIDER4,
                                             DIVIDER5, DIVIDER6)
                #print frame.shape
            # Archive raw frames from video to disk for later inspection/testing
#	    rows,cols,_ = frame.shape

#	    M = cv2.getRotationMatrix2D((cols/2,rows/2),90,1)
#	    dst = cv2.warpAffine(frame,M,(cols,rows))
            processed = process_frame(frame, bg_subtractor, car_counter)
            #M2 = cv2.getRotationMatrix2D((cols/2,rows/2),0,1)
            #dst2 = cv2.warpAffine(processed,M2,(cols,rows))
            #cv2.imshow('Source Image', frame)
            cv2.imshow('Processed Image', processed)

            c = cv2.waitKey(10)
            if c == 27:
                break

    cap.release()
    cv2.destroyAllWindows()
Exemple #6
0
def main():
    log = logging.getLogger("main")
    #print (cv2.__version__)
    log.debug("Creating background subtractor...")
    bg_subtractor = cv2.bgsegm.createBackgroundSubtractorMOG()
    
    log.debug("Pre-training the background subtractor...")
    default_bg = cv2.imread(IMAGE_FILENAME_FORMAT % 119)
    bg_subtractor.apply(default_bg, None, 1.0)

    car_counter = None # Will be created after first frame is captured

    # Set up image source
    log.debug("Initializing video capture device #%s...", IMAGE_SOURCE)
    cap = cv2.VideoCapture(IMAGE_SOURCE)

    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    
    log.debug("Video capture frame size=(w=%d, h=%d)", frame_width, frame_height)

    log.debug("Starting capture loop...")
    frame_number = -1
    x1 = 0
    x2 = 0
    y1 = 0
    y2 = 0
    while True:
        frame_number += 1
        log.debug("Capturing frame #%d...", frame_number)
        ret, frame = cap.read()
        for i in range(0,2,1):
            ret, frame = cap.read()
        if not ret:
            log.error("Frame capture failed, stopping...")
            break
        #Redimensionando a imagem
        #frame= cv2.resize(frame,(0,0),fx=0.3,fy=0.3)
        #Girando a imagem 90 graus
        frame = imutils.rotate_bound(frame,90)
        '''if frame_number==0:
            bbox = cv2.selectROI(frame, False)
            #print (bbox)
            x1 = int(bbox[0])
            y1 = int(bbox[1])
            x2 = int(bbox[0]) + int(bbox[2])
            y2 = int(bbox[1]) + int(bbox[3])
            print("[%d a %d] [%d a %d]" % (y1,y2,x1,x2))
            print (frame.shape)
        '''    
        #RAFAEL - PARAMETRO        
        #frame = frame[y1:y2,x1:x2] #output1.avi
        #frame = frame[115:172,72:280] #output1.avi
        #frame = frame[384:573,240:934] #output1.avi
        #frame = frame[130:200,0:230] video1.avi
        log.debug("Got frame #%d: shape=%s", frame_number, frame.shape)

        if car_counter is None:
            # We do this here, so that we can initialize with actual frame size
            log.debug("Creating vehicle counter...")
            #RAFAEL - se tirar o /2 a imagem inteira é avaliada            
        
            car_counter = VehicleCounter(frame.shape[:2], int(frame.shape[0]/2))

        # Archive raw frames from video to disk for later inspection/testing
        if CAPTURE_FROM_VIDEO:
            save_frame(IMAGE_FILENAME_FORMAT
                , frame_number, frame, "source frame #%d")

        log.debug("Processing frame #%d...", frame_number)
        processed = process_frame(frame_number, frame, bg_subtractor, car_counter)
        #Girar novamente as imagens
        processed = imutils.rotate_bound(processed,-90)
        frame = imutils.rotate_bound(frame,-90)
        save_frame(IMAGE_DIR + "/processed_%04d.png"
            , frame_number, processed, "processed frame #%d")

        cv2.imshow('Source Image', frame)
        cv2.imshow('Processed Image', processed)

        log.debug("Frame #%d processed.", frame_number)

        c = cv2.waitKey(WAIT_TIME)
        if c == 27:
            log.debug("ESC detected, stopping...")
            break
        
    print ("Carros: %d " % car_counter.car_count)
    print ("Motos: %d" % car_counter.motocycle_count)
    log.debug("Closing video capture device...")
    cap.release()
    cv2.destroyAllWindows()
    log.debug("Done.")
Exemple #7
0
def main():
    log = logging.getLogger("main")

    log.debug("Creating background subtractor...")
    bg_subtractor = cv2.BackgroundSubtractorMOG()

    log.debug("Pre-training the background subtractor...")
    default_bg = cv2.imread(IMAGE_FILENAME_FORMAT % 119)
    bg_subtractor.apply(default_bg, None, 1.0)

    car_counter = None  # Will be created after first frame is captured

    # Set up image source
    log.debug("Initializing video capture device #%s...", IMAGE_SOURCE)
    cap = cv2.VideoCapture(IMAGE_SOURCE)

    frame_width = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
    frame_height = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
    log.debug("Video capture frame size=(w=%d, h=%d)", frame_width,
              frame_height)

    log.debug("Starting capture loop...")
    frame_number = -1
    while True:
        frame_number += 1
        log.debug("Capturing frame #%d...", frame_number)
        ret, frame = cap.read()
        if not ret:
            log.error("Frame capture failed, stopping...")
            break

        log.debug("Got frame #%d: shape=%s", frame_number, frame.shape)

        if car_counter is None:
            # We do this here, so that we can initialize with actual frame size
            log.debug("Creating vehicle counter...")
            car_counter = VehicleCounter(frame.shape[:2], frame.shape[0] / 2)

        # Archive raw frames from video to disk for later inspection/testing
        if CAPTURE_FROM_VIDEO:
            save_frame(IMAGE_FILENAME_FORMAT, frame_number, frame,
                       "source frame #%d")

        log.debug("Processing frame #%d...", frame_number)
        processed = process_frame(frame_number, frame, bg_subtractor,
                                  car_counter)

        save_frame(IMAGE_DIR + "/processed_%04d.png", frame_number, processed,
                   "processed frame #%d")

        cv2.imshow('Source Image', frame)
        cv2.imshow('Processed Image', processed)

        log.debug("Frame #%d processed.", frame_number)

        c = cv2.waitKey(WAIT_TIME)
        if c == 27:
            log.debug("ESC detected, stopping...")
            break

    log.debug("Closing video capture device...")
    cap.release()
    cv2.destroyAllWindows()
    log.debug("Done.")
Exemple #8
0
def main():
    start = time.time()
    log = logging.getLogger("main")

    log.debug("Creating background subtractor...")
    bg_subtractor = cv2.BackgroundSubtractorMOG()

    log.debug("Pre-training the background subtractor...")
    default_bg = cv2.imread(IMAGE_FILENAME_FORMAT % 1)
    bg_subtractor.apply(default_bg, None, 1.0)

    car_counter = None # Will be created after first frame is captured

    # Set up image source
    log.debug("Initializing video capture device #%s...", IMAGE_SOURCE)
    cap = cv2.VideoCapture(IMAGE_SOURCE)

    # Capture every TIME_INTERVAL seconds (here, TIME_INTERVAL = 5)
    fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)  # Gets the frames per second
    #multiplier = TIME_INTERVAL

    log.debug("Update car counting by every %d ...", TIME_INTERVAL)

    # Check Camera is open or not
    if not cap.isOpened():
        log.debug("The Camera is not open ...")
        cap.open()

    CV_CAP_PROP_FRAME_WIDTH = 3
    CV_CAP_PROP_FRAME_HEIGHT = 4
    if CAPTURE_FROM_STREAMING:
        cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
        cap.set(CV_CAP_PROP_FRAME_HEIGHT, 320);

    frame_width = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
    frame_height = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
    log.debug("Video capture frame size=(w=%d, h=%d)", frame_width, frame_height)

    log.debug("Starting capture loop...\n")
    frame_number = -1
    while True:
        frame_number += 1
        log.debug("Capturing frame #%d...", frame_number)
        ret, frame = cap.read()
        if not ret:
            log.error("Frame capture failed, stopping...")
            break

        log.debug("Got frame #%d: shape=%s", frame_number, frame.shape)

        if car_counter is None:
            # We do this here, so that we can initialize with actual frame size
            log.debug("Creating vehicle counter...")
            car_counter = VehicleCounter(frame.shape[:2], frame.shape[0] / 2)

        # Archive raw frames from video to disk for later inspection/testing
        if CAPTURE_FROM_VIDEO and CAPTURE_FROM_STREAMING:
            save_frame(IMAGE_FILENAME_FORMAT
                , frame_number, frame, "source frame #%d")

        log.debug("Processing frame #%d...", frame_number)
        processed = process_frame(frame_number, frame, bg_subtractor, car_counter)

        save_frame(IMAGE_DIR + "/processed_%04d.png"
            , frame_number, processed, "processed frame #%d")

        cv2.imshow('Source Image', frame)
        cv2.imshow('Processed Image', processed)

        log.debug("Frame #%d processed.\n", frame_number)

        c = cv2.waitKey(WAIT_TIME)
        if c == 27:
            log.debug("ESC detected, stopping...")
            break
    during = frame_number / fps
    log.debug("Closing video capture device...")
    cap.release()
    cv2.destroyAllWindows()
    log.debug("Done.")
    end = time.time()
    return car_counter.vehicle_count,during
Exemple #9
0
def main():
    log = logging.getLogger("main")

    log.debug("Creating background subtractor...")
    ###MUDADO....
    bg_subtractor = cv2.bgsegm.createBackgroundSubtractorMOG()
    # bg_subtractor = cv2.BackgroundSubtractorMOG2()

    log.debug("Pre-training the background subtractor...")
    default_bg = cv2.imread(IMAGE_FILENAME_FORMAT)
    bg_subtractor.apply(default_bg, None, 1.0)

    car_counter = None # Will be created after first frame is captured

    # Set up image source
    log.debug("Initializing video capture device #%s...", IMAGE_SOURCE)
    cap = cv2.VideoCapture(IMAGE_SOURCE)

    frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) #MUDEI

    log.debug("Video capture frame size=(w=%d, h=%d)", frame_width, frame_height)

    log.debug("Starting capture loop...")
    frame_number = -1
    while True:
        frame_number += 1
        log.debug("Capturing frame #%d...", frame_number)
        ret, frame = cap.read()
        if not ret:
            log.error("Frame capture failed, stopping...")
            break
        frame= cv2.resize(frame,(0,0),fx=0.3,fy=0.3)
        #RAFAEL - PARAMETRO
        frame = frame[115:172,72:280] #output1.avi
        #frame = frame[384:573,240:934] #output1.avi
        #frame = frame[130:200,0:230] video1.avi
        log.debug("Got frame #%d: shape=%s", frame_number, frame.shape)

        if car_counter is None:
            # We do this here, so that we can initialize with actual frame size
            log.debug("Creating vehicle counter...")
            #RAFAEL - se tirar o /2 a imagem inteira é avaliada
            #print frame.shape[0]/2
            car_counter = VehicleCounter(frame.shape[:2], frame.shape[0]/2)

        # Archive raw frames from video to disk for later inspection/testing
        ##MUDADO
        # if CAPTURE_FROM_VIDEO:
        #     save_frame(IMAGE_FILENAME_FORMAT
        #         , frame_number, frame, "source frame #%d")

        log.debug("Processing frame #%d...", frame_number)
        processed = process_frame(frame_number, frame, bg_subtractor, car_counter)

        save_frame(IMAGE_DIR + "/processed_%04d.png"
            , frame_number, processed, "processed frame #%d")

        cv2.imshow('Source Image', frame)
        cv2.imshow('Processed Image', processed)

        log.debug("Frame #%d processed.", frame_number)

        c = cv2.waitKey(WAIT_TIME)
        if c == 27:
            log.debug("ESC detected, stopping...")
            break

    print ("Carros: %d " % car_counter.car_count)
    print ("Motos: %d" % car_counter.motocycle_count)
    log.debug("Closing video capture device...")
    cap.release()
    cv2.destroyAllWindows()
    log.debug("Done.")
Exemple #10
0
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))

# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])

# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
# vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
fps = FPS().start()


vehicle_count = VehicleCounter(exit_masks=EXIT_PTS)
d = {}
# loop over the frames from the video stream
while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    frame = imutils.resize(frame, width=400)

    # grab the frame dimensions and convert it to a blob
    (h, w) = frame.shape[:2]
    blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
        0.007843, (300, 300), 127.5)

    # pass the blob through the network and obtain the detections and
    # predictions
def main():
    log = logging.getLogger('main')

    # creating exit mask from points, where we will be counting our vehicles
    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    exit_mask = cv2.fillPoly(base, EXIT_PTS, (255, 255, 255))[:, :, 0]
    stream = None
    # produce a stabilized video
    if args.stabilize_video == 'yes':
        cap = cv2.VideoCapture(args.video_source)
        stabilize_frames(cap, log)
        return
    else:
        stream = cv2.VideoCapture(args.video_source)
        stream.set(cv2.CAP_PROP_FRAME_WIDTH, SHAPE[1])
        stream.set(cv2.CAP_PROP_FRAME_HEIGHT, SHAPE[0])

    writer = VideoWriter('detected.mp4', (SHAPE[1], SHAPE[0]))

    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)
    # skipping 500 frames to train bg subtractor
    train_bg_subtractor(bg_subtractor, stream, num=500)

    pipeline = PipelineRunner(
        pipeline=[
            ContourDetection(bg_subtractor=bg_subtractor,
                             save_image=False,
                             image_dir=IMAGE_DIR),
            # we use y_weight == 2.0 because traffic are moving vertically on video
            # use x_weight == 2.0 for horizontal.
            # VehicleCounter(exit_masks=[exit_mask], y_weight=2.0),
            VehicleCounter(),
            Visualizer(image_dir=IMAGE_DIR),
            CsvWriter(path='./', name='report.csv')
        ],
        log_level=logging.DEBUG)

    _frame_number = -1
    frame_number = -1

    while True:
        (grabbed, frame) = stream.read()

        if not frame.any():
            log.error("Frame capture failed, stopping...")
            break

        # real frame number
        _frame_number += 1

        # skip every 2nd frame to speed up processing
        if _frame_number % 2 != 0:
            continue

        # frame number that will be passed to pipline
        # this needed to make video from cutted frames
        frame_number += 1

        pipeline.set_context({
            'frame': frame,
            'frame_number': frame_number,
        })
        new_context = pipeline.run()

        cv2.imshow('Video', new_context['frame'])
        writer(new_context['frame'])

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break