def track(self, boxes, labels, probs, box_feats, frame_num):
        """Given new object detection output, update the tracking."""
        obj_id_to_class_ = obj_id_to_class
        if self.is_coco_class:
            obj_id_to_class_ = coco_obj_id_to_class
            if self.partial_classes:
                obj_id_to_class_ = self.partial_obj_id2class
        detections = create_obj_infos(
            frame_num,
            boxes,
            probs,
            labels,
            box_feats,
            obj_id_to_class_, [self.track_obj],
            self.min_confidence,
            self.min_detection_height,
            1.0,
            is_coco_model=self.is_coco_class,
            coco_to_actev_mapping=coco_obj_to_actev_obj)
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes,
                                                    self.nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # update tracker
        self.tracker.predict()
        self.tracker.update(detections)

        # store result
        for track in self.tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:

                if not track.is_confirmed and track.time_since_update == 0:
                    bbox = track.to_tlwh()
                    dp = [
                        frame_num, track.track_id, bbox[0], bbox[1], bbox[2],
                        bbox[3]
                    ]
                    if track.track_id not in self.tmp_tracking_dict:
                        self.tmp_tracking_dict[track.track_id] = [dp]
                    else:
                        self.tmp_tracking_dict[track.track_id].append(dp)
                continue

            bbox = track.to_tlwh()
            if track.track_id in self.tmp_tracking_dict:
                pred_list = self.tmp_tracking_dict[track.track_id]
                for dp in pred_list:
                    self.tracking_results.append(dp)
                self.tmp_tracking_dict[track.track_id].pop(
                    track.track_id, None)
            self.tracking_results.append([
                frame_num, track.track_id, bbox[0], bbox[1], bbox[2], bbox[3]
            ])
Exemplo n.º 2
0
def run_detect_and_track(args,
                         frame_stack,
                         sess,
                         model,
                         targetid2class,
                         tracking_objs,
                         tracker_dict,
                         tracking_results_dict,
                         tmp_tracking_results_dict,
                         obj_out_dir=None,
                         valid_frame_num=None):
    # ignore the padded images
    if valid_frame_num is None:
        valid_frame_num = len(frame_stack)

    resized_images, scales, frame_idxs = zip(*frame_stack)

    feed_dict = model.get_feed_dict_forward_multi(resized_images)

    sess_input = [
        model.final_boxes, model.final_labels, model.final_probs,
        model.final_valid_indices, model.fpn_box_feat
    ]
    # [B, num, 4], [B, num], [B, num], [B], [M, 256, 7, 7]
    batch_boxes, batch_labels, batch_probs, valid_indices, batch_box_feats = \
        sess.run(sess_input, feed_dict=feed_dict)
    assert np.sum(valid_indices) == batch_box_feats.shape[0], "duh"

    for b in range(valid_frame_num):
        cur_frame = frame_idxs[b]

        # [k, 4]
        final_boxes = batch_boxes[b][:valid_indices[b]]
        # [k]
        final_labels = batch_labels[b][:valid_indices[b]]
        # [k]
        final_probs = batch_probs[b][:valid_indices[b]]
        # [k, 256, 7, 7]
        previous_box_num = sum(valid_indices[:b])
        box_feats = batch_box_feats[previous_box_num:previous_box_num +
                                    valid_indices[b]]

        if args.get_tracking:

            assert len(box_feats) == len(final_boxes)

            for tracking_obj in tracking_objs:
                target_tracking_obs = [tracking_obj]

                # will consider scale here
                scale = scales[b]
                detections = create_obj_infos(
                    cur_frame,
                    final_boxes,
                    final_probs,
                    final_labels,
                    box_feats,
                    targetid2class,
                    target_tracking_obs,
                    args.min_confidence,
                    args.min_detection_height,
                    scale,
                    is_coco_model=args.is_coco_model,
                    coco_to_actev_mapping=coco_obj_to_actev_obj)
                # Run non-maxima suppression.
                boxes = np.array([d.tlwh for d in detections])
                scores = np.array([d.confidence for d in detections])
                indices = preprocessing.non_max_suppression(
                    boxes, args.nms_max_overlap, scores)
                detections = [detections[i] for i in indices]

                # tracking
                tracker_dict[tracking_obj].predict()
                tracker_dict[tracking_obj].update(detections)

                # Store results
                for track in tracker_dict[tracking_obj].tracks:
                    if not track.is_confirmed() or track.time_since_update > 1:
                        if (not track.is_confirmed()
                            ) and track.time_since_update == 0:
                            bbox = track.to_tlwh()
                            if track.track_id not in \
                                tmp_tracking_results_dict[tracking_obj]:
                                tmp_tracking_results_dict[tracking_obj][track.track_id] = \
                                    [[cur_frame, track.track_id, bbox[0], bbox[1],
                                      bbox[2], bbox[3]]]
                            else:
                                tmp_tracking_results_dict[tracking_obj][
                                    track.track_id].append([
                                        cur_frame, track.track_id, bbox[0],
                                        bbox[1], bbox[2], bbox[3]
                                    ])
                        continue
                    bbox = track.to_tlwh()
                    if track.track_id in tmp_tracking_results_dict[
                            tracking_obj]:
                        pred_list = tmp_tracking_results_dict[tracking_obj][
                            track.track_id]
                        for pred_data in pred_list:
                            tracking_results_dict[tracking_obj].append(
                                pred_data)
                        tmp_tracking_results_dict[tracking_obj].pop(
                            track.track_id, None)
                    tracking_results_dict[tracking_obj].append([
                        cur_frame, track.track_id, bbox[0], bbox[1], bbox[2],
                        bbox[3]
                    ])

        if obj_out_dir is None:  # not saving the boxes

            continue

        # ---------------- get the json outputs for object detection

        # scale back the box to original image size
        final_boxes = final_boxes / scales[b]

        # save as json
        pred = []

        for j, (box, prob,
                label) in enumerate(zip(final_boxes, final_probs,
                                        final_labels)):
            box[2] -= box[0]
            box[3] -= box[1]  # produce x,y,w,h output

            cat_id = int(label)
            cat_name = targetid2class[cat_id]

            res = {
                "category_id": int(cat_id),
                "cat_name": cat_name,  # [0-80]
                "score": float(round(prob, 7)),
                #"bbox": list(map(lambda x: float(round(x, 2)), box)),
                "bbox": [float(round(x, 2)) for x in box],
                "segmentation": None,
            }

            pred.append(res)

        predfile = os.path.join(video_out_path, "%d.json" % (cur_frame))

        with open(predfile, "w") as f:
            json.dump(pred, f)
Exemplo n.º 3
0
                            model.final_probs, model.fpn_box_feat
                        ]
                        final_boxes, final_labels, final_probs, box_feats = sess.run(
                            sess_input, feed_dict=feed_dict)

                    assert len(box_feats) == len(final_boxes)

                    for tracking_obj in tracking_objs:
                        target_tracking_obs = [tracking_obj]
                        detections = create_obj_infos(
                            cur_frame,
                            final_boxes,
                            final_probs,
                            final_labels,
                            box_feats,
                            targetid2class,
                            target_tracking_obs,
                            args.min_confidence,
                            args.min_detection_height,
                            scale,
                            is_coco_model=args.is_coco_model,
                            coco_to_actev_mapping=coco_obj_to_actev_obj)
                        # Run non-maxima suppression.
                        boxes = np.array([d.tlwh for d in detections])
                        scores = np.array([d.confidence for d in detections])
                        indices = preprocessing.non_max_suppression(
                            boxes, args.nms_max_overlap, scores)
                        detections = [detections[i] for i in indices]

                        # tracking
                        tracker_dict[tracking_obj].predict()
                                            "%d.npy" % (cur_frame))
                    np.save(featfile, box_feats)
                elif args.get_tracking:
                    sess_input = [
                        model.final_boxes, model.final_labels,
                        model.final_probs, model.fpn_box_feat
                    ]
                    final_boxes, final_labels, final_probs, box_feats = sess.run(
                        sess_input, feed_dict=feed_dict)
                    assert len(box_feats) == len(final_boxes)

                    for tracking_obj in tracking_objs:
                        target_tracking_obs = [tracking_obj]
                        detections = create_obj_infos(
                            cur_frame, final_boxes, final_probs, final_labels,
                            box_feats, targetid2class, target_tracking_obs,
                            args.min_confidence, args.min_detection_height,
                            scale)
                        # Run non-maxima suppression.
                        boxes = np.array([d.tlwh for d in detections])
                        scores = np.array([d.confidence for d in detections])
                        indices = preprocessing.non_max_suppression(
                            boxes, args.nms_max_overlap, scores)
                        detections = [detections[i] for i in indices]

                        # tracking
                        tracker_dict[tracking_obj].predict()
                        tracker_dict[tracking_obj].update(detections)

                        # Store results
                        for track in tracker_dict[tracking_obj].tracks: