Ejemplo n.º 1
0
    def process_image_ros2(self, msg):
        try:
            frame = self.bridge.imgmsg_to_cv2(msg, "bgr8")

            frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5)

            # # run face detector on current frame

            detections = self.motpy_detector.process_image(frame)

            self.tracker.step(detections)
            tracks = self.tracker.active_tracks(min_steps_alive=3)

            self.publish_d_msgs(tracks, msg)

            # preview the boxes on frame----------------------------------------
            for det in detections:
                draw_detection(frame, det)

            for track in tracks:
                draw_track(frame, track)

            cv2.imshow('frame', frame)
            if cv2.waitKey(int(1000 * self.dt)) & 0xFF == ord('q'):
                pass

        except Exception as err:
            print(err)

        pass
def run():
    # prepare multi object tracker
    model_spec = {
        'order_pos': 1,
        'dim_pos': 2,
        'order_size': 0,
        'dim_size': 2,
        'q_var_pos': 5000.,
        'r_var_pos': 0.1
    }

    dt = 1 / 15.0  # assume 15 fps
    tracker = MultiObjectTracker(dt=dt, model_spec=model_spec)

    # open camera
    cap = cv2.VideoCapture(0)

    face_detector = FaceDetector()

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5)

        # run face detector on current frame
        bboxes = face_detector.process(frame)
        detections = [Detection(box=bbox) for bbox in bboxes]
        logger.debug(f'detections: {detections}')

        tracker.step(detections)
        tracks = tracker.active_tracks(min_steps_alive=3)
        logger.debug(f'tracks: {tracks}')

        # preview the boxes on frame
        for det in detections:
            draw_detection(frame, det)

        for track in tracks:
            draw_track(frame, track)

        cv2.imshow('frame', frame)

        # stop demo by pressing 'q'
        if cv2.waitKey(int(1000 * dt)) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 3
0
def demo_tracking_visualization(
        model_spec=ModelPreset.constant_acceleration_and_static_box_size_2d.
    value,
        num_steps: int = 1000,
        num_objects: int = 20):
    gen = image_generator(num_steps=num_steps,
                          num_objects=num_objects,
                          max_omega=0.03,
                          miss_prob=0.33,
                          disappear_prob=0.00,
                          det_err_sigma=3.33)

    dt = 1 / 24
    tracker = MultiObjectTracker(dt=dt,
                                 model_spec=model_spec,
                                 active_tracks_kwargs={
                                     'min_steps_alive': 2,
                                     'max_staleness': 6
                                 },
                                 tracker_kwargs={'max_staleness': 12})

    for _ in range(num_steps):
        img, _, detections = next(gen)
        detections = [d for d in detections if d.box is not None]

        t0 = time.time()
        active_tracks = tracker.step(detections=detections)
        elapsed = (time.time() - t0) * 1000.
        logger.debug(f'tracking elapsed time: {elapsed:.3f} ms')

        for track in active_tracks:
            draw_track(img, track)

        for det in detections:
            draw_rectangle(img, det.box, color=(10, 220, 20), thickness=1)

        cv2.imshow('preview', img)
        # stop the demo by pressing q
        wait_ms = int(1000 * dt)
        c = cv2.waitKey(wait_ms)
        if c == ord('q'):
            break

    cv2.destroyAllWindows()
Ejemplo n.º 4
0
    def process_trackers(self, frame, tracks):

        for track in tracks:
            color = True
            if (len(track.trace) > 1):

                x1, y1 = track.trace[-2]
                x2, y2 = track.trace[-1]
                if (self.mask[y1][x1] == False and self.mask[y2][x2] == True
                        and (track.id not in self.mark.keys())):
                    self.mark[track.id] = 1
                    self.counter_on += 1
                    color = False
                elif (self.mask[y1][x1] == True
                      and self.mask[y2][x2] == False):
                    if (track.id in self.mark.keys()):
                        self.counter_on -= 1
                        self.mark.pop(track.id)
                    else:
                        self.counter_off += 1
                        color = False
            # draw_detection_box(frame,track.box_cur)
            draw_track(frame, track, random_color=color)
Ejemplo n.º 5
0
def run(dataset_root: str,
        fps: float = 30.0,
        split: str = 'train',
        seq_id: str = '04',
        sel: str = 'gt',
        drop_detection_prob: float = 0.1,
        add_detection_noise: float = 5.0):
    """ parses detections, loads frames, runs tracking and visualizes the tracked objects """

    dataset_root = os.path.expanduser(dataset_root)
    if not os.path.isdir(dataset_root):
        logger.error('%s does not exist' % dataset_root)
        exit(-1)

    if str(seq_id) not in ALLOWED_SEQ_IDS:
        logger.error('unknown MOT16 sequence: %s' % str(seq_id))
        exit(-1)

    dataset_root2 = f'{dataset_root}/{split}/MOT16-{seq_id}'
    frames_dir = f'{dataset_root2}/img1'
    logger.info(f'reading video frames from {frames_dir}')

    dets_path = f'{dataset_root2}/{sel}/{sel}.txt'
    dets_gen = read_detections(dets_path,
                               drop_detection_prob=drop_detection_prob,
                               add_detection_noise=add_detection_noise)

    tracker = MultiObjectTracker(
        dt=1 / fps,
        tracker_kwargs={'max_staleness': 15},
        model_spec='constant_acceleration_and_static_box_size_2d',
        matching_fn_kwargs={'min_iou': 0.25})

    # tracking loop
    while True:
        # read detections for a given frame
        try:
            frame_idx, detections = next(dets_gen)
        except Exception as e:
            logger.warning('finished reading the sequence')
            logger.trace(f'exception: {e}')
            break

        # read the frame for a given index
        frame = read_video_frame(frames_dir, frame_idx)
        if frame is None:
            continue

        # provide the MOT tracker with predicted detections
        t1 = get_miliseconds()
        active_tracks = tracker.step(detections)
        ms_elapsed = get_miliseconds() - t1
        logger.debug('step duration: %dms' % ms_elapsed)

        # visualize predictions and tracklets
        for det in detections:
            draw_detection(frame, det)

        for track in active_tracks:
            draw_track(frame, track)

        cv2.imshow('preview', frame)

        # stop execution on q
        key = cv2.waitKey(int(1000 / fps))
        if key == ord('q'):
            logger.info('early stopping')
            break
Ejemplo n.º 6
0
def run(video_path: str,
        detect_labels,
        video_downscale: float = 1.,
        architecture: str = 'ssdlite320',
        confidence_threshold: float = 0.5,
        tracker_min_iou: float = 0.25,
        show_detections: bool = False,
        track_text_verbose: int = 0,
        device: str = 'cpu',
        viz_wait_ms: int = 1):
    # setup detector, video reader and object tracker
    detector = CocoObjectDetector(class_ids=get_class_ids(detect_labels),
                                  confidence_threshold=confidence_threshold,
                                  architecture=architecture,
                                  device=device)
    cap, cap_fps = read_video_file(video_path)
    tracker = MultiObjectTracker(dt=1 / cap_fps,
                                 tracker_kwargs={'max_staleness': 5},
                                 model_spec={
                                     'order_pos': 1,
                                     'dim_pos': 2,
                                     'order_size': 0,
                                     'dim_size': 2,
                                     'q_var_pos': 5000.,
                                     'r_var_pos': 0.1
                                 },
                                 matching_fn_kwargs={
                                     'min_iou': tracker_min_iou,
                                     'multi_match_min_iou': 0.93
                                 })

    while True:
        ret, frame = cap.read()
        if not ret:
            break

        frame = cv2.resize(frame,
                           fx=video_downscale,
                           fy=video_downscale,
                           dsize=None,
                           interpolation=cv2.INTER_AREA)

        # detect objects in the frame
        detections = detector.process_image(frame)

        # track detected objects
        _ = tracker.step(detections=detections)
        active_tracks = tracker.active_tracks(min_steps_alive=3)

        # visualize and show detections and tracks
        if show_detections:
            for det in detections:
                draw_detection(frame, det)

        for track in active_tracks:
            draw_track(frame,
                       track,
                       thickness=2,
                       text_at_bottom=True,
                       text_verbose=track_text_verbose)

        cv2.imshow('frame', frame)
        c = cv2.waitKey(viz_wait_ms)
        if c == ord('q'):
            break
Ejemplo n.º 7
0
def run():
    # prepare multi object tracker
    model_spec = {
        'order_pos': 1,
        'dim_pos': 2,
        'order_size': 0,
        'dim_size': 2,
        'q_var_pos': 5000.,
        'r_var_pos': 0.1
    }

    # model_spec = {
    #         'order_pos': 1, 'dim_pos': 2, # position is a center in 2D space; under constant velocity model
    #         'order_size': 0, 'dim_size': 2, # bounding box is 2 dimensional; under constant velocity model
    #         'q_var_pos': 1000., # process noise
    #         'r_var_pos': 0.1 # measurement noise
    #     }

    # tracker = MultiObjectTracker(dt=1 / 10, model_spec=model_spec)

    dt = 1 / 15.0  # assume 8 fps
    tracker = MultiObjectTracker(dt=dt, model_spec=model_spec)
    input_video = args.input_video

    # open camera
    cap = cv2.VideoCapture(input_video)

    # vid = imageio.get_reader(input_video, 'ffmpeg')

    people_detector = PeopleDetector()

    while (True):
        ret, frame = cap.read()

        # frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5)

        # run face detector on current frame
        bboxes = people_detector.process(frame, args.confidence)
        detections = [Detection(box=bbox) for bbox in bboxes]
        logger.debug(f'detections: {detections}')

        tracker.step(detections)
        tracks = tracker.active_tracks(min_steps_alive=3)
        logger.debug(f'tracks: {tracks}')

        # preview the boxes on frame
        for det in detections:
            draw_detection(frame, det)

        for track in tracks:
            draw_track(frame, track)

        if cv2.waitKey(1) & 0xFF == ord('q') or ret == False:
            cap.release()
            cv2.destroyAllWindows()
            break
        cv2.imshow('frame', frame)

        # stop demo by pressing 'q'
        if cv2.waitKey(int(1000 * dt)) & 0xFF == ord('q'):
            break

    # cap.release()
    cv2.destroyAllWindows()
    frame = cv2.resize(frame, dsize=None, fx=1.98, fy=1.37)
    outOpencvDnn, bboxes, detections = detectFaceDNN(net, frame)
    logger.debug(f'detections: {bboxes}')

    #tracking take place with the help of motpy library
    tracker.step(detections)
    tracks = tracker.active_tracks(min_steps_alive=3)
    logger.debug(f'tracks: {tracks}')

    # people counting algorithm (to be completed)
    print(outOpencvDnn.shape)
    x_new, y_new = ExtractBoxValues(tracks)
    print("x_old = " + str(x_old))
    print("y_old = " + str(y_old))
    print("x_new = " + str(x_new))
    print("y_new = " + str(y_new))
    if (x_old > x_new):
        print("Left")
    x_old, y_old = StorePreviousValues(x_new, y_new)

    for track in tracks:
        draw_track(outOpencvDnn, track)
    total_persons = len(bboxes)
    cv2.putText(outOpencvDnn, 'Persons in frame = ' + str(total_persons),
                (200, 25), font, 1, (255, 0, 0), 2)
    cv2.imshow('Video', outOpencvDnn)
    if cv2.waitKey(1) & 0xFF == ord('p'):  # press 'p' to terminate program
        break
video_capture.release()
cv2.destroyAllWindows()
Ejemplo n.º 9
0
    def run():
        # prepare multi object tracker
        model_spec = {'order_pos': 1, 'dim_pos': 2,
                      'order_size': 0, 'dim_size': 2,
                      'q_var_pos': 5000., 'r_var_pos': 0.1}

        dt = 1 / 15.0  # assume 15 fps
        tracker = MultiObjectTracker(dt=dt, model_spec=model_spec)
        detectors = Detectors(args.get("cascade_face"), args.get("cascade_eyes"), False)
        face_detector = FaceDetector()
        faces_dict = {}
        save_img = Utility()
        logger.debug(" Initialization of classes completed.")
        logger.debug("Initializing Azure Face Identification API.")
        face_identifier = AzureFaceIdentify()

        # open camera
        cap = cv2.VideoCapture(0)
        while True:
            ret, frame = cap.read()
            if not ret:
                break

            # frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5)

            if not detectors.motion_detector(frame):
                time.sleep(0.5)
                logger.info("No change in frames. Waiting for 1 second before checking movement again.")
                time.sleep(1)
                continue

            logger.info("Movement detected in frame.")
            # run face detector on current frame
            detections = face_detector.process_image(frame)
            logger.info(f"{len(detections)} Faces detected in frame. ")
            tracker.step(detections)
            tracks = tracker.active_tracks(min_steps_alive=3)

            all_track_ary =[]
            if len(tracks) > 0:
                identify_faces = False
                for track in tracks:
                    all_track_ary.append(track.id)
                    if track.id in faces_dict.keys():
                        logger.info("Already detected face shown in the frame.")
                    else:
                        faces_dict[track.id] = "person data here."
                        identify_faces = True
                        logger.info("New Person entered in front of camera.")
                if identify_faces:
                    persons_identified = face_identifier.identify_persons(frame)
                    save_img.saveFrametoLocal(frame)
                    logger.info("Saving the newly entered face image for the confirmation.")
            remove_faces = []
            if len(faces_dict) > 0:
                for key in faces_dict.keys():
                    if key not in all_track_ary:
                        remove_faces.append(key)
                        logger.info("Entered Face moved out of visibility of the camera.")

            for key in remove_faces:
                del faces_dict[key]
                logger.debug("Removed face id from tracking no longer existing in front of camera.")


            # preview the boxes on frame
            for det in detections:
                draw_detection(frame, det)

            for track in tracks:
                draw_track(frame, track)

            cv2.imshow('frame', frame)
            # stop demo by pressing 'q'
            if cv2.waitKey(int(1000 * dt)) & 0xFF == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()