Пример #1
0
def main():
    log = logging.getLogger("main")

    # creating exit mask from points, where we will be counting our vehicles
    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    exit_mask = cv2.fillPoly(base, EXIT_PTS, (255, 255, 255))[:, :, 0]

    # there is also bgslibrary, that seems to give better BG substruction, but
    # not tested it yet
    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)

    # processing pipline for programming conviniance
    pipeline = PipelineRunner(
        pipeline=[
            ContourDetection(bg_subtractor=bg_subtractor,
                             save_image=True,
                             image_dir=IMAGE_DIR),
            # we use y_weight == 2.0 because traffic are moving vertically on video
            # use x_weight == 2.0 for horizontal.
            VehicleCounter(exit_masks=[exit_mask], y_weight=2.0),
            Visualizer(image_dir=IMAGE_DIR),
            CsvWriter(path='./', name='report.csv')
        ],
        log_level=logging.DEBUG)

    # Set up image source
    # You can use also CV2, for some reason it not working for me
    cap = skvideo.io.vreader(VIDEO_SOURCE)

    # skipping 500 frames to train bg subtractor
    train_bg_subtractor(bg_subtractor, cap, num=500)

    _frame_number = -1
    frame_number = -1
    for frame in cap:
        if not frame.any():
            log.error("Frame capture failed, stopping...")
            break

        # real frame number
        _frame_number += 1

        # skip every 2nd frame to speed up processing
        if _frame_number % 2 != 0:
            continue

        # frame number that will be passed to pipline
        # this needed to make video from cutted frames
        frame_number += 1

        # plt.imshow(frame)
        # plt.show()
        # return

        pipeline.set_context({
            'frame': frame,
            'frame_number': frame_number,
        })
        pipeline.run()
Пример #2
0
def main():
    log = logging.getLogger("main")

    # creating exit mask from points, where we will be counting our vehicles
    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    exit_mask = cv2.fillPoly(base, EXIT_PTS, (255, 255, 255))[:, :, 0]

    # there is also bgslibrary, that seems to give better BG substruction, but
    # not tested it yet
    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)

    # processing pipeline for programming conviniance
    pipeline = PipelineRunner(
        pipeline=[
            ContourDetection(bg_subtractor=bg_subtractor,
                             save_image=True,
                             image_dir=IMAGE_DIR),
            # we use y_weight == 2.0 because traffic are moving vertically on video
            # use x_weight == 2.0 for horizontal.
            VehicleCounter(exit_masks=[exit_mask], y_weight=2.0, x_weight=1.5),
            Visualizer(image_dir=IMAGE_DIR),
            CsvWriter(path='./', name=REPORT_NAME)
        ],
        log_level=logging.DEBUG)

    # Set up image source
    # You can use also CV2, for some reason it not working for me
    cap = cv2.VideoCapture(VIDEO_SOURCE)

    # skipping 100 frames to train bg subtractor
    train_bg_subtractor(bg_subtractor, cap, num=500)

    frame_number = -1
    while True:
        ret, frame = cap.read()
        if frame is None:
            log.error("Frame capture failed, stopping...")
            break

        frame_number += 1

        # plt.imshow(frame)
        # plt.show()
        # return

        pipeline.set_context({
            'frame': frame,
            'frame_number': frame_number,
        })
        pipeline.run()
Пример #3
0
def main():
    log = logging.getLogger("main")

    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    exit_mask = cv2.fillPoly(base, EXIT_PTS, (255, 255, 255))[:, :, 0]

    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)

    pipeline = PipelineRunner(
        pipeline=[
            ContourDetection(bg_subtractor=bg_subtractor,
                             save_image=True,
                             image_dir=IMAGE_DIR),
            # y_weight == 2.0 vertical
            # x_weight == 2.0 for horizontal. in below line
            VehicleCounter(exit_masks=[exit_mask], y_weight=2.0),
            Visualizer(image_dir=IMAGE_DIR),
            CsvWriter(path='./', name='report.csv')
        ],
        log_level=logging.DEBUG)

    cap = cv2.VideoCapture(VIDEO_SOURCE)

    train_bg_subtractor(bg_subtractor, cap, num=500)

    _frame_number = -1
    frame_number = -1
    while cap.isOpened():
        ret, frame = cap.read()

        _frame_number += 1

        if _frame_number % 2 != 0:
            continue

        frame_number += 1

        # plt.imshow(frame)
        # plt.show()
        # return

        pipeline.set_context({
            'frame': frame,
            'frame_number': frame_number,
        })
        pipeline.run()
    print("End of Video Reached......")
Пример #4
0
def main():
    log = logging.getLogger("main")

    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    exit_mask = cv2.fillPoly(base, EXIT_PTS, (255, 255, 255))[:, :, 0]

    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)

    pipeline = PipelineRunner(pipeline=[
        ContourDetection(bg_subtractor=bg_subtractor,
                         save_image=True,
                         image_dir=IMAGE_DIR),
        VehicleCounter(exit_masks=[exit_mask], y_weight=2.0),
        Visualizer(image_dir=IMAGE_DIR),
        CsvWriter(path='./', name='report.csv')
    ],
                              log_level=logging.DEBUG)

    cap = skvideo.io.vreader(VIDEO_SOURCE)

    train_bg_subtractor(bg_subtractor, cap, num=500)

    _frame_number = -1
    frame_number = -1
    for frame in cap:
        if not frame.any():
            log.error("Frame capture failed, stopping...")
            break

        _frame_number += 1

        if _frame_number % 2 != 0:
            continue

        frame_number += 1

        # plt.imshow(frame)
        # plt.show()
        # return

        pipeline.set_context({
            'frame': frame,
            'frame_number': frame_number,
        })
        pipeline.run()
def main():
    log = logging.getLogger("main")

    # creating an exit mask from points where we will be counting out vehicles
    base = np.zeros(Shape + (3, ), dtype='uint8')
    exit_mask = cv2.fillPoly(base, Exit_pts, (255, 255, 255))[:, :, 0]

    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)

    # processing pipeline for programming convenience
    pipeline = PipelineRunner(
        pipeline=[
            ContourDetection(bg_subtractor=bg_subtractor,
                             save_image=True,
                             image_dir=Image_dir),
            # we use y_weight == 2.0 because traffic are moving vertically on video
            # use x_weight == 2.0 for horizontal
            VehicleCounter(exit_masks=[exit_mask], y_weight=2.0),
            Visualizer(image_dir=Image_dir),
            CsvWriter(path='./', name='report.csv')
        ],
        log_level=logging.DEBUG)

    #setting up image source
    cap = skvideo.io.vreader(Video_source)

    # skip 500 frames to train bg subtractor
    train_bg_subtractor(bg_subtractor, cap, num=500)
    _frame_number = -1
    frame_number = -1
    for frame in cap:
        if not frame.any():
            log.error("frame capture failed, stopping...")
            break

        # real frame number
        _frame_number += 1
        #skipping every 2nd frame to speed up processing
        if _frame_number % 2 != 0:
            continue
        frame_number += 1

        pipeline.set_context({'frame': frame, 'frame_number': frame_number})
        pipeline.run()
def main():
    log = logging.getLogger("main")

    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    area_mask = cv2.fillPoly(base, [AREA_PTS], (255, 255, 255))[:, :, 0]

    pipeline = PipelineRunner(
        pipeline=[
            CapacityCounter(area_mask=area_mask,
                            save_image=True,
                            image_dir=IMAGE_DIR),
            # saving every 10 seconds
            ContextCsvWriter('./report.csv',
                             start_time=1505494325,
                             fps=1,
                             faster=10,
                             field_names=['capacity'])
        ],
        log_level=logging.DEBUG)

    # Set up image source
    cap = skvideo.io.vreader(VIDEO_SOURCE)

    frame_number = -1
    st = time.time()

    try:
        for frame in cap:
            if not frame.any():
                log.error("Frame capture failed, skipping...")

            frame_number += 1

            pipeline.set_context({
                'frame': frame,
                'frame_number': frame_number,
            })
            context = pipeline.run()

            # skipping 10 seconds
            for i in xrange(240):
                cap.next()
    except Exception as e:
        log.exception(e)
def main():
    log = logging.getLogger("main")

    #membuat titik area, dimana kendaraan akan dihitung========================

    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    area_mask = cv2.fillPoly(base, [AREA_PTS], (255, 255, 255))[:, :, 0]

    pipeline = PipelineRunner(
        pipeline=[
            CapacityCounter(area_mask=area_mask,
                            save_image=True,
                            image_dir=IMAGE_DIR),
            # saving every 10 seconds
        ],
        log_level=logging.DEBUG)

    #mengatur sumber gambar diambil============================================

    cap = skvideo.io.vreader(VIDEO_SOURCE)

    frame_number = -1
    st = time.time()
    for frame in cap:
        if not frame.any():
            log.error("frame capture failed, stopping....")

    #nomor frame asli========================================================

        frame_number += 1

        pipeline.set_context({
            'frame': frame,
            'frame_number': frame_number,
        })
        context = pipeline.run()

        #skipping 10 second====================================================

        for i in range(240):
            cap.__next__()
Пример #8
0
def traffic_capacity(q_capacity, q_camera_frames):

    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    area_mask = cv2.fillPoly(base, [AREA_PTS], (255, 255, 255))[:, :, 0]

    pipeline = PipelineRunner(pipeline=[CapacityCounter(area_mask=area_mask)])

    cap = cv2.VideoCapture(0)  # Taking camera input
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 600)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
    cap.set(cv2.CAP_PROP_FPS, 24)

    frame_number = -1

    try:
        while (True):
            frame_number += 1
            flag, frame = cap.read()

            pipeline.set_context({
                'frame': frame,
                'frame_number': frame_number,
            })
            context = pipeline.run()

            img = {}
            img["frame"] = frame
            q_camera_frames.put(img)

            print("\n[{}] \t Frame: {} \t Capacity: {}%".format(
                datetime.datetime.now().strftime('%d-%m-%Y %I:%M:%S %p'),
                context['frame_number'], round(context['capacity'] * 100, 5)))
            q_capacity.put(round(context['capacity'] * 100,
                                 5))  # putting capacity on a queue

        cap.release()

    except Exception as e:
        print("EXCEPTION: ", e)
Пример #9
0
def main():

    # creating exit mask from points, where we will be counting our vehicles
    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    exit_mask = cv2.fillPoly(base, EXIT_PTS, (255, 255, 255))[:, :, 0]

    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)

    pipeline = PipelineRunner(pipeline=[
        ContourDetection(
            bg_subtractor=bg_subtractor, save_image=True, image_dir=IMAGE_DIR),
        # we use y_weight == 2.0 because traffic are moving vertically on video
        # use x_weight == 2.0 for horizontal.
        VehicleCounter(exit_masks=[exit_mask], y_weight=2.0),
        Visualizer(image_dir=IMAGE_DIR),
    ])

    cap = skvideo.io.vreader(VIDEO_SOURCE)

    train_bg_subtractor(bg_subtractor, cap, num=500)

    _frame_number = -1
    frame_number = -1
    for frame in cap:
        if not frame.any():
            break

        _frame_number += 1
        if _frame_number % 2 != 0:
            continue

        frame_number += 1

        pipeline.set_context({
            'frame': frame,
            'frame_number': frame_number,
        })
        pipeline.run()
Пример #10
0
def main():
    log = logging.getLogger("main")

    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    area_mask = cv2.fillPoly(base, [AREA_PTS], (255, 255, 255))[:, :, 0]

    pipeline = PipelineRunner(pipeline=[
        CapacityCounter(area_mask=area_mask,
                        save_image=True,
                        image_dir=IMAGE_DIR),
        ContextCsvWriter('./report.csv',
                         start_time=1505494325,
                         fps=1,
                         faster=10,
                         field_names=['capacity'])
    ],
                              log_level=logging.DEBUG)

    cap = skvideo.io.vreader(VIDEO_SOURCE)

    frame_number = -1
    st = time.time()
    for frame in cap:
        if not frame.any():
            log.error("Some shit happened, bye...")

        frame_number += 1

        pipeline.set_context({
            'frame': frame,
            'frame_number': frame_number,
        })
        context = pipeline.run()

        for i in range(240):
            cap.__next__()
Пример #11
0
    _frame_number += 1

    # skip every 2nd frame to speed up processing
    if _frame_number % 2 != 0:
        continue

    # frame number that will be passed to pipline
    # this needed to make video from cutted frames
    frame_number += 1

    # plt.imshow(frame)
    # plt.show()
    # return
    # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    pipeline.set_context({
        'frame': frame,
        'frame_number': frame_number,
    })
    cc = pipeline.run()
    frame = cc['frame']
    cv2.imshow('op', frame)
    if cv2.waitKey(33) == 27:
        break
#def main():

# ============================================================================
'''if __name__ == "__main__":
    log = utils.init_logging()

    if not os.path.exists(IMAGE_DIR):
        log.debug("Creating image directory `%s`...", IMAGE_DIR)
        os.makedirs(IMAGE_DIR)
Пример #12
0
def main():
    log = logging.getLogger('main')

    # creating exit mask from points, where we will be counting our vehicles
    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    exit_mask = cv2.fillPoly(base, EXIT_PTS, (255, 255, 255))[:, :, 0]
    stream = None
    # produce a stabilized video
    if args.stabilize_video == 'yes':
        cap = cv2.VideoCapture(args.video_source)
        stabilize_frames(cap, log)
        return
    else:
        stream = cv2.VideoCapture(args.video_source)
        stream.set(cv2.CAP_PROP_FRAME_WIDTH, SHAPE[1])
        stream.set(cv2.CAP_PROP_FRAME_HEIGHT, SHAPE[0])

    writer = VideoWriter('detected.mp4', (SHAPE[1], SHAPE[0]))

    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)
    # skipping 500 frames to train bg subtractor
    train_bg_subtractor(bg_subtractor, stream, num=500)

    pipeline = PipelineRunner(
        pipeline=[
            ContourDetection(bg_subtractor=bg_subtractor,
                             save_image=False,
                             image_dir=IMAGE_DIR),
            # we use y_weight == 2.0 because traffic are moving vertically on video
            # use x_weight == 2.0 for horizontal.
            # VehicleCounter(exit_masks=[exit_mask], y_weight=2.0),
            VehicleCounter(),
            Visualizer(image_dir=IMAGE_DIR),
            CsvWriter(path='./', name='report.csv')
        ],
        log_level=logging.DEBUG)

    _frame_number = -1
    frame_number = -1

    while True:
        (grabbed, frame) = stream.read()

        if not frame.any():
            log.error("Frame capture failed, stopping...")
            break

        # real frame number
        _frame_number += 1

        # skip every 2nd frame to speed up processing
        if _frame_number % 2 != 0:
            continue

        # frame number that will be passed to pipline
        # this needed to make video from cutted frames
        frame_number += 1

        pipeline.set_context({
            'frame': frame,
            'frame_number': frame_number,
        })
        new_context = pipeline.run()

        cv2.imshow('Video', new_context['frame'])
        writer(new_context['frame'])

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
def main():
    log = logging.getLogger("main")

    # pick pixel distance start&end by double clicking
    PIXEL_DISTANCE = select_pixel_distance(VIDEO_SOURCE)
    if not PIXEL_DISTANCE:
        print("No selection of PIXEL_DISTANCE!")
        return
    else:
        print('PIXEL_DISTANCE: ')
        print(PIXEL_DISTANCE)

    PYHSICAL_DISTANCE = float(
        input(
            "Please enter the physical distance in meters of the pixel distance selected:"
        ))

    METER_PER_PIXEL = PYHSICAL_DISTANCE / PIXEL_DISTANCE

    # draw polygons using mouse to pick exit points
    EXIT_PTS = select_exit_zones(VIDEO_SOURCE)
    if not EXIT_PTS:
        print("No selection of exit zone!")
        return
    else:
        EXIT_PTS = np.array(EXIT_PTS)
        print('EXIT_PTS: ')
        print(EXIT_PTS)

    # Set up image source
    cap = cv2.VideoCapture(VIDEO_SOURCE)

    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    print(width, height)

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(VIDEO_OUT_DEST, fourcc, fps, (width, height))

    # creating exit mask from points, where we will be counting our vehicles
    base = np.zeros((height, width) + (3, ), dtype='uint8')
    exit_mask = cv2.fillPoly(base, EXIT_PTS, (255, 255, 255))[:, :, 0]

    # there is also bgslibrary, that seems to give better BG substruction, but
    # not tested it yet
    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)

    # processing pipline for programming conviniance
    pipeline = PipelineRunner(
        pipeline=[
            ContourDetection(bg_subtractor=bg_subtractor,
                             min_contour_width=int(MIN_CONTOUR_RATIO * height),
                             min_contour_height=int(MIN_CONTOUR_RATIO *
                                                    height),
                             save_image=False,
                             image_dir=IMAGE_DIR),
            # we use y_weight == 2.0 because traffic are moving vertically on video
            # use x_weight == 2.0 for horizontal.
            VehicleCounter(use_physical_speed=USE_PHYSICAL_SPEED,
                           meter_per_pixel=METER_PER_PIXEL,
                           fps=fps,
                           avg_speed_interval=AVG_SPEED_INTERVAL,
                           exit_masks=[exit_mask],
                           y_weight=2.0,
                           path_size=10),
            Visualizer(use_physical_speed=USE_PHYSICAL_SPEED,
                       video_out=out,
                       image_dir=IMAGE_DIR,
                       save_image=False),
            CsvWriter(path='./', name='report.csv')
        ],
        log_level=logging.DEBUG)

    # skipping 500 frames to train bg subtractor, close video and reopen
    train_bg_subtractor(bg_subtractor, cap, num=500)
    cap.release()

    frame_number = -1
    frame_time_sec = -1.0 / fps
    cap = cv2.VideoCapture(VIDEO_SOURCE)
    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret == True:
            # calculate the frame time in second
            frame_time_sec += 1.0 / fps

            # frame number that will be passed to pipline
            # this needed to make video from cutted frames
            frame_number += 1

            # plt.imshow(frame)
            # plt.show()
            # return

            pipeline.set_context({
                'frame': frame,
                'frame_number': frame_number,
                'frame_time_sec': frame_time_sec
            })
            pipeline.run()

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break
    # Release everything if job is finished
    cap.release()
    out.release()
    cv2.destroyAllWindows()