Пример #1
0
def main():
    ret = True
    frame_dim = video_reader.getVideoDimension()
    video_writer = VideoWriter('abcd', frame_dim)
    while ret:
        ret, frame, frame_no = video_reader.getFrame()
        rects = []
        # frame = low_light_enhancer.enhance(frame)

        faces = face_detector.detect(frame)
        frame, rects = face_detector.draw_boundary_box(frame)

        objects, maxAppereds = face_tagger.update(rects)

        for (objectID, centroid) in objects.items():
            text = "ID {0}".format(objectID)
            text_color = (255, 0, 0)

            if maxAppereds.get(objectID) >= 50:
                text_color = (0, 255, 0)

            cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, text_color, 2)

            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

        # cv2.imshow('Sakurasoul', face_detector.draw_boundary_box(frame))
        # video_reader.saveFrame(frame)
        video_writer.writeFrame(frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Пример #2
0
    def __init__(self, channel: str):
        self.name = channel
        self.start_ts = time.time()
        cfg = CV_CONFIG.channel(channel)
        if cfg is None:
            raise CaptureException(
                f'Channel "{channel}" not found in configuration')
        if cfg.capture.source is None:
            raise CaptureException(
                f'Channel "{channel}" does not have any capture source')

        self._q_prev_blur = deque(maxlen=cfg.detector.frames_ago)
        self._pre_cap = deque(maxlen=cfg.capture.fps * cfg.capture.pre_motion)
        self._f_post_motion = cfg.capture.fps * cfg.capture.post_motion
        self._f_t_wait = int(1000 / cfg.capture.fps)
        self._uptime = cfg.capture.uptime

        self._cap = cv2.VideoCapture(cfg.capture.source)
        if cfg.capture.width:
            self._cap.set(3, cfg.capture.width)
        if cfg.capture.height:
            self._cap.set(4, cfg.capture.height)

        if cfg.writer.location:
            self._writer = VideoWriter(cfg)
        else:
            self._writer = None

        self.f_recon = FaceRecognition()

        self.cfg = cfg

        self.mqtt = MqttHelper(cfg)
        self._key_frame.reset()

        signal.signal(signal.SIGINT, self.shutdown)
        signal.signal(signal.SIGTERM, self.shutdown)
Пример #3
0
class Capture:
    completed: bool = False
    last_motion_ts = 0
    c_post_motion = 0
    counter = Counter()
    frame = None
    _key_frame = KeyFrame()

    def __init__(self, channel: str):
        self.name = channel
        self.start_ts = time.time()
        cfg = CV_CONFIG.channel(channel)
        if cfg is None:
            raise CaptureException(
                f'Channel "{channel}" not found in configuration')
        if cfg.capture.source is None:
            raise CaptureException(
                f'Channel "{channel}" does not have any capture source')

        self._q_prev_blur = deque(maxlen=cfg.detector.frames_ago)
        self._pre_cap = deque(maxlen=cfg.capture.fps * cfg.capture.pre_motion)
        self._f_post_motion = cfg.capture.fps * cfg.capture.post_motion
        self._f_t_wait = int(1000 / cfg.capture.fps)
        self._uptime = cfg.capture.uptime

        self._cap = cv2.VideoCapture(cfg.capture.source)
        if cfg.capture.width:
            self._cap.set(3, cfg.capture.width)
        if cfg.capture.height:
            self._cap.set(4, cfg.capture.height)

        if cfg.writer.location:
            self._writer = VideoWriter(cfg)
        else:
            self._writer = None

        self.f_recon = FaceRecognition()

        self.cfg = cfg

        self.mqtt = MqttHelper(cfg)
        self._key_frame.reset()

        signal.signal(signal.SIGINT, self.shutdown)
        signal.signal(signal.SIGTERM, self.shutdown)

    @property
    def frame_id(self):
        """
        Recording Session Frame Id
        to identify recording file and Frame number
        """
        if self._writer is None:
            return None
        return self._writer.session_frame_id

    @property
    def key_frame(self):
        if self._key_frame.empty():
            return self.frame
        if self._key_frame.face.frame is not None:
            return self._key_frame.face.frame
        if self._key_frame.car.frame is not None:
            return self._key_frame.car.frame
        return self._key_frame.motion.frame

    def start(self):
        self.completed = False
        self.mqtt.connect()
        cfg = self.cfg
        wait = (self._f_t_wait, 1, 0)
        wi = 0
        w_label = ('Normal', 'Fast', 'Pause')
        f_ts = time.time()
        while not self.completed:
            _, img = self._cap.read()
            if img is None:
                print('End of capture stream.')
                self.completed = True
                break
            self.frame = Frame(img, self.cfg)

            self._task_prep_image()

            if cfg.capture.show_frame:
                fps = 1 / (time.time() - f_ts)
                f_ts = time.time()
                if self.counter.motion:
                    label = f'Motion Frame {self.counter.motion}'
                    cv2.putText(img, label, (6, 38), cv2.FONT_HERSHEY_DUPLEX,
                                0.75, (18, 0, 255), 1)
                cv2.putText(img, w_label[wi] + f' (fps: {fps})', (6, 18),
                            cv2.FONT_HERSHEY_DUPLEX, 0.75, (18, 255, 0), 1)
                cv2.imshow(self.name, img)

            if self._uptime and self._uptime < time.time() - self.start_ts:
                print('Uptime reached. Stopping...')
                self.completed = True

            ch = cv2.waitKey(wait[wi])
            if ch == 27:
                print('Escape key pressed. Stopping...')
                self.completed = True
            elif ch == 32:
                wi += 1
                if wi >= len(wait):
                    wi = 0

        if self._writer:
            self._writer.close()
        cv2.destroyAllWindows()
        self.mqtt.disconnect()
        print('Capture stream has been terminated.')
        print(f'Elapsed time: {time.time() - self.start_ts}')
        sys.exit(0)

    def _task_prep_image(self):
        ret = execute_task('prep', self.frame)

        self._q_prev_blur.append(ret.img_blur)
        if len(self._q_prev_blur) > 1:
            self._task_detect_motion()

    def _task_detect_motion(self):
        self.frame.set('base_img', self._q_prev_blur[0])
        if self.cfg.detector.save_image_path:
            self.frame.set('save_motion_img_path',
                           self.cfg.detector.save_image_path)
        ret = execute_task('motion', self.frame)
        log_state = None
        if ret.motion:
            self.counter.motion += 1
            if self.counter.motion == 1:
                log_state = 'STARTED'
                if self.mqtt.enabled:
                    self.mqtt.publish_event('motion_start', ret.image,
                                            self.frame_id,
                                            self.cfg.mqtt.motion_reset)
            elif self.c_post_motion != self._f_post_motion:
                log_state = 'RESUME'
            self.c_post_motion = self._f_post_motion
            self.last_motion_ts = time.time()
            self._key_frame.motion.frame = self.frame
            if self._key_frame.motion.area < ret.motion_area:
                self._key_frame.motion.frame = ret.frame
                self._key_frame.motion.area = ret.motion_area

        elif self.c_post_motion > 0:
            if self.c_post_motion == self._f_post_motion:
                log_state = 'IDLE'
            self.c_post_motion -= 1
            if self.c_post_motion == 0:
                self._writer.close()
                self.counter.reset()
                if self.mqtt.enabled:
                    self.mqtt.publish_event('motion_completed',
                                            self._key_frame.motion.frame.image,
                                            self.frame_id,
                                            self.cfg.mqtt.motion_reset)
                self._key_frame.reset()
                log_state = 'STOPPED'

        if self._writer:
            self._write_frame()

        if log_state is not None:
            print(f'Motion {log_state} on frame {self.frame_id or ""}: '
                  f'c={self.counter.motion}, '
                  f'ma={ret.motion_area}, t={ret.motion_ts}')

        if ret.motion:
            self._task_detect_objects()

    def _task_detect_objects(self):
        fd_cfg = self.cfg.face_detect
        cd_cfg = self.cfg.car_detect
        tasks = []
        if fd_cfg.enabled and self.counter.face < fd_cfg.max_face:
            tasks.append('face')
        if cd_cfg.enabled and self.counter.car < cd_cfg.max_car:
            tasks.append('car')

        if not tasks:
            return

        ret = execute_threading(tasks, self.frame)
        if ret.face_detected:
            self._task_face_detected(ret)
        if ret.car_detected:
            self._task_car_detected(ret)

    def _task_face_detected(self, ret):
        self.counter.face += 1
        print('Face DETECTED on frame '
              f'{self.frame_id}: c={ret.face_detected}, '
              f'ma={ret.face_area}, t={ret.face_ts}')
        if self._key_frame.face.area < ret.face_area:
            self._key_frame.face.frame = ret
            self._key_frame.face.area = ret.face_area
            if self.mqtt.enabled:
                self.mqtt.publish_event('face_detected', ret.image,
                                        self.frame_id,
                                        self.cfg.mqtt.face_reset)
        self.f_recon.load_image(self.frame.image).draw_rects()
        if self.f_recon.locations:
            cv2.imshow('Face Detected:', self.frame.image)

    def _task_car_detected(self, ret):
        self.counter.car += 1
        print('Car DETECTED on frame '
              f'{self.frame_id}: c={ret.car}, '
              f'ma={ret.car_area}, t={ret.car_ts}')
        if self.cfg.car_detect.draw_rect and ret.car_zones:
            for zone in ret.car_zones:
                x, y, w, h = zone
                cv2.rectangle(self.frame.image, (x, y), (x + w, y + h),
                              self.cfg.car_detect.draw_rect_color,
                              self.cfg.car_detect.draw_rect_thickness)

        if self._key_frame.car.area < ret.car_area:
            self._key_frame.car.frame = ret
            self._key_frame.car.area = ret.car_area
            if self.mqtt.enabled:
                self.mqtt.publish_event('car_detected', ret.image,
                                        self.frame_id, self.cfg.mqtt.car_reset)

    def _write_frame(self):
        fd_cfg = self.cfg.face_detect
        cd_cfg = self.cfg.car_detect
        frame = self.frame
        if frame.face_detected and fd_cfg.draw_rect:
            print('*******Face:', frame.face_zones)
            for x, y, w, h in frame.face_zones:
                cv2.rectangle(frame.image, (x, y), (x + w, y + h),
                              fd_cfg.draw_rect_color,
                              fd_cfg.draw_rect_thickness)
        if frame.car_detected and cd_cfg.draw_rect:
            print('*******Car:', frame.car_zones)
            for x, y, w, h in frame.car_zones:
                cv2.rectangle(frame.image, (x, y), (x + w, y + h),
                              cd_cfg.draw_rect_color,
                              cd_cfg.draw_rect_thickness)
        # Always record frames
        if self.cfg.writer.record_on == 'always':
            self._writer.write(self.frame.image)
            return

        # Record frames on motion
        elif self.cfg.writer.record_on == 'motion':
            # Motion detected
            if self.frame.motion:
                # Reset post-frame counter
                self.c_post_motion = self._f_post_motion
                # Write pre-frame from buffer
                if len(self._pre_cap) > 0:
                    while self._pre_cap:
                        self._writer.write(self._pre_cap.pop())
                # Write frame
                self._writer.write(self.frame.image)

            # Motion stopped: Keep recording on
            # post-frame countdown
            elif self.c_post_motion > 0:
                self._writer.write(self.frame.image)

            # Buffer pre-capture frames
            else:
                self._pre_cap.append(self.frame.image)

    def shutdown(self, signum, frame):
        print(f'Shutting down ({signum})...')
        self.completed = True
Пример #4
0
from auto_capture import AutoCapture
from video_writer import VideoWriter

if __name__ == "__main__":



    interval = 10
    duration = 25200
    num_frames = int(duration/interval)
    print(num_frames)
    cap = AutoCapture(interval, duration, '../images/test', (1920,1080))
    cap.capture()
    writer = VideoWriter('../images/test', '../videos/test', (1920,1080), num_frames, 15)
    writer.write()
Пример #5
0
                        help='Height of the frames in the video stream.')
    args = parser.parse_args()

    input_q = Queue(5)
    output_q = Queue()
    for i in range(1):
        t = Thread(target=worker, args=(input_q, output_q))
        t.daemon = True
        t.start()

    input_video = 'rtsp://*****:*****@192.168.0.4:554/openUrl/GXKelBC'
    # cap = cv2.VideoCapture(input_video)
    video_capture = WebcamVideoStream(src=input_video,
                                      width=args.width,
                                      height=args.height).start()
    writer = VideoWriter('output.mp4', (args.width, args.height))
    '''
    stream = cv2.VideoCapture(0)
    stream.set(cv2.CAP_PROP_FRAME_WIDTH, args.width)
    stream.set(cv2.CAP_PROP_FRAME_HEIGHT, args.height)
    '''

    fps = FPS().start()
    object_tracker = ObjectTracker(path='./', file_name='report.csv')
    while True:
        frame = video_capture.read()
        # (ret, frame) = stream.read()
        fps.update()

        if fps.get_numFrames() % 2 != 0:
            continue
Пример #6
0
def main():
    log = logging.getLogger('main')

    # creating exit mask from points, where we will be counting our vehicles
    base = np.zeros(SHAPE + (3, ), dtype='uint8')
    exit_mask = cv2.fillPoly(base, EXIT_PTS, (255, 255, 255))[:, :, 0]
    stream = None
    # produce a stabilized video
    if args.stabilize_video == 'yes':
        cap = cv2.VideoCapture(args.video_source)
        stabilize_frames(cap, log)
        return
    else:
        stream = cv2.VideoCapture(args.video_source)
        stream.set(cv2.CAP_PROP_FRAME_WIDTH, SHAPE[1])
        stream.set(cv2.CAP_PROP_FRAME_HEIGHT, SHAPE[0])

    writer = VideoWriter('detected.mp4', (SHAPE[1], SHAPE[0]))

    bg_subtractor = cv2.createBackgroundSubtractorMOG2(history=500,
                                                       detectShadows=True)
    # skipping 500 frames to train bg subtractor
    train_bg_subtractor(bg_subtractor, stream, num=500)

    pipeline = PipelineRunner(
        pipeline=[
            ContourDetection(bg_subtractor=bg_subtractor,
                             save_image=False,
                             image_dir=IMAGE_DIR),
            # we use y_weight == 2.0 because traffic are moving vertically on video
            # use x_weight == 2.0 for horizontal.
            # VehicleCounter(exit_masks=[exit_mask], y_weight=2.0),
            VehicleCounter(),
            Visualizer(image_dir=IMAGE_DIR),
            CsvWriter(path='./', name='report.csv')
        ],
        log_level=logging.DEBUG)

    _frame_number = -1
    frame_number = -1

    while True:
        (grabbed, frame) = stream.read()

        if not frame.any():
            log.error("Frame capture failed, stopping...")
            break

        # real frame number
        _frame_number += 1

        # skip every 2nd frame to speed up processing
        if _frame_number % 2 != 0:
            continue

        # frame number that will be passed to pipline
        # this needed to make video from cutted frames
        frame_number += 1

        pipeline.set_context({
            'frame': frame,
            'frame_number': frame_number,
        })
        new_context = pipeline.run()

        cv2.imshow('Video', new_context['frame'])
        writer(new_context['frame'])

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Пример #7
0
    # calibrate capture perspective
    current_striker = 0
    strike_point = [[], []]

    mouse_callback_param = dict()
    mouse_callback_param['strike_points'] = strike_point
    mouse_callback_param['current_striker'] = current_striker
    mouse_callback_param['trackers'] = [tracker1, tracker2]
    mouse_callback_param['calibrater'] = cal
    mainwin = 'rgb'
    cv2.namedWindow(mainwin)
    cv2.setMouseCallback(mainwin, rgbMouseCallback, [mouse_callback_param])

    writer = VideoWriter('./data',
                         'tennis_trans.mp4',
                         fps=int(reader.fps),
                         resolution=reader.size,
                         isColor=1)

    print 'init ok'

    while True:
        # time.sleep(0.1)
        ret, _frame = reader.read()
        if cv2.waitKey(1) & 0xFF == ord('q') or not ret:
            break

        timer = cv2.getTickCount()

        trans_frame = cal.transform_image(_frame)
        warped = trasform_remap(trans_frame, map_x, map_y)
Пример #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-i',
                        '--input-dir',
                        type=str,
                        help="dataset base directory",
                        required=True)
    parser.add_argument('-o',
                        '--output-dir',
                        type=str,
                        help="output video directory",
                        default=None)
    parser.add_argument(
        '--radar-name',
        action='append',
        help="generate video with specific radar serial (left radar first).",
        required=True)
    parser.add_argument('--frame-rate',
                        type=int,
                        help="output video frame rate",
                        default=10)
    parser.add_argument('--quality-factor',
                        type=int,
                        help="video compression quality factor",
                        default=25)
    parser.add_argument('--show-timestamp', action='store_true')
    parser.add_argument('--no-sar', action='store_true')
    parser.add_argument('--no-point-cloud', action='store_true')

    args = parser.parse_args()

    fig = plt.figure()
    fig.show()
    artist = None

    input_output_paths = []
    for radar_name in args.radar_name:
        io_path = get_io_paths(radar_name,
                               args.input_dir,
                               args.output_dir,
                               no_sar=args.no_sar,
                               no_point_cloud=args.no_point_cloud)
        input_output_paths.append(io_path)

    # create individual outputs
    for io_path in input_output_paths:
        video_writer = None

        last_frame_id = 0
        with ExitStack() as stack:
            for image_pc_pair in sync_streams(io_path.image_pbs_path,
                                              io_path.pc_pbs_path):
                if image_pc_pair.image is None and image_pc_pair.pc is None:
                    break

                # create rgb image from point cloud / SAR
                im_rgb = to_rgb_image(image_pc_pair)

                # flip image because image (0,0) is at top left corner
                # while radar image (0,0) is at bottom left corner
                im_rgb = np.copy(np.flip(im_rgb, axis=0))
                (im_height, im_width, _) = im_rgb.shape

                # get timestamp and frame id
                if image_pc_pair.pc is not None:
                    timestamp = image_pc_pair.pc.timestamp
                    frame_id = image_pc_pair.pc.frame_id
                else:
                    timestamp = image_pc_pair.image.timestamp
                    frame_id = image_pc_pair.image.frame_id

                if frame_id - last_frame_id > 1 and frame_id > 0:
                    print("DROP FRAME DETECTED: %d" % frame_id)

                last_frame_id = frame_id

                if args.show_timestamp:
                    im_rgb = overlay_timestamp(timestamp, frame_id, im_rgb)

                # setup onscreen display
                if artist is None:
                    artist = fig.gca().imshow(
                        np.zeros(im_rgb.shape, dtype=np.uint8))

                # setup video writer and image model writer
                if args.output_dir is not None and video_writer is None:
                    video_writer = VideoWriter(io_path.video_output_path,
                                               im_width, im_height,
                                               args.frame_rate,
                                               args.quality_factor)
                    video_writer = stack.enter_context(video_writer)

                    proto_writer = stack.enter_context(
                        ProtoStreamWriter(io_path.image_model_output_path))

                # write out frame
                if video_writer is not None:
                    video_writer(im_rgb)

                    if image_pc_pair.image is not None:
                        proto_out = image_pc_pair.image.to_proto(
                            timestamp, frame_id)
                        proto_writer.write(proto_out)

                # on screen display
                artist.set_data(im_rgb)
                fig.canvas.draw()
                plt.pause(1e-4)

    # This is not the greatest way to merge videos from multiple cameras.
    # For visualization is this okey
    if len(input_output_paths) != 2:
        return

    left_video = cv2.VideoCapture(input_output_paths[0].video_output_path)
    right_video = cv2.VideoCapture(input_output_paths[1].video_output_path)
    merged_video_path = join(args.output_dir, "merged_video.mp4")
    merged_video_writer = None

    print("merging video ...")
    frame_count = 0
    with ExitStack() as stack:
        while left_video.isOpened() and right_video.isOpened():
            _, left_frame = left_video.read()
            _, right_frame = right_video.read()

            combined_frame = np.vstack((left_frame, right_frame))
            combined_frame = cv2.cvtColor(combined_frame, cv2.COLOR_BGR2RGB)
            combined_frame = cv2.rotate(combined_frame,
                                        cv2.ROTATE_90_COUNTERCLOCKWISE)

            if merged_video_writer is None:
                (im_height, im_width, _) = combined_frame.shape
                merged_video_writer = VideoWriter(merged_video_path, im_width,
                                                  im_height, args.frame_rate,
                                                  args.quality_factor)
                merged_video_writer = stack.enter_context(merged_video_writer)

            merged_video_writer(combined_frame)
            frame_count += 1

            if frame_count % 100 == 0:
                print("wrote %s frames" % frame_count)
Пример #9
0
if __name__ == '__main__':
    # camera = Reader()
    #
    # camera.run(path='data', name='tennis.mp4')
    #
    # _frame = camera.get()
    # shape = _frame.shape

    camera = cv2.VideoCapture('data/tennis.mp4')

    if not camera.isOpened():
        raise Exception
    writer = VideoWriter(path='./data',
                         name='tennis_trans.mp4',
                         fps=camera.get(cv2.CAP_PROP_FPS),
                         resolution=(int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)),
                                     int(camera.get(
                                         cv2.CAP_PROP_FRAME_HEIGHT))),
                         isColor=1)
    mainwin = 'test'

    while True:
        ret, _frame = camera.read()
        if not ret:
            break
        writer.write(_frame)
        cv2.imshow(mainwin, _frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    camera.stop()