コード例 #1
0
 def run(self) -> None:
     encoder = build_encoder(self.config)
     while self.running:
         if self.lock.acquire(timeout=1):
             features = encoder(self.tracker.latest_detections,
                                self.tracker.frame)
             for i, feature in enumerate(features):
                 self.tracker.latest_features[i][encoder.name] = feature
             self.next_lock.release()
     logging.getLogger('MOT').info('Encoder thread terminated.')
コード例 #2
0
 def __init__(self, detector: Config, matcher: Config,
              encoders: List[Config], predictor: Config, **kwargs):
     # Allow detector to be None
     detector = build_detector(detector) if detector is not None else None
     matcher = build_matcher(matcher)
     encoders = [build_encoder(encoder_cfg) for encoder_cfg in encoders]
     predictor = build_predictor(predictor)
     if 'detection_filters' in kwargs.keys():
         kwargs['detection_filters'] = [
             build_detection_filter(filter_cfg)
             for filter_cfg in kwargs['detection_filters']
         ]
     super().__init__(detector, encoders, matcher, predictor, **kwargs)
コード例 #3
0
    def __init__(self, detector: Config, encoders: List[Config], matcher: Config, predictor: Config,
                 central_address: str, **kwargs):
        self.detector = build_detector(detector)
        self.encoders = [build_encoder(encoder) for encoder in encoders]
        self.detection_filters = []
        self.identifier = random.randint(0, 1000000)

        self.zmq_context = zmq.Context()
        self.subscribe_socket = self.zmq_context.socket(zmq.SUB)
        self.subscribe_socket.connect('tcp://' + central_address)
        self.subscribe_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))

        super(SingleThreadTracker, self).__init__(self.detector, self.encoders, matcher, predictor, **kwargs)
コード例 #4
0
 def __init__(self, detector: Config, matcher: Config, encoders: List[Config], predictor: Config,
              sigma_active: float = 0.5, lambda_active: float = 0.6,
              lambda_new: float = 0.3, secondary_matcher: Config = None, **kwargs):
     detector = build_detector(detector)
     matcher = build_matcher(matcher)
     encoders = [build_encoder(encoder_cfg) for encoder_cfg in encoders]
     predictor = build_predictor(predictor)
     self.sigma_active = sigma_active
     self.lambda_active = lambda_active
     self.lambda_new = lambda_new
     self.tracklets_inactive = []
     if secondary_matcher is not None:
         self.secondary_matcher = build_matcher(secondary_matcher)
     super().__init__(detector, encoders, matcher, predictor, **kwargs)
コード例 #5
0
from mot.structures import Detection

sys.path.append(
    '/home/linkinpark213/Source/online-mot-by-detection/mot/encode/OpenReID/')
from reid.evaluators import pairwise_distance, evaluate_all

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('dataset_path', type=str, help='Path to dataset')
    args = parser.parse_args()

    encoder_cfg = cfg_from_file(
        '/home/linkinpark213/Source/online-mot-by-detection/configs/encode/openreid_r50.py'
    )
    print('Building encoder...')
    encoder = build_encoder(encoder_cfg)

    subset_features = {}

    for subset in ['query', 'gallery']:
        ids = sorted(os.listdir(os.path.join(args.dataset_path, subset)))

        all_infos = []
        all_features = []

        for target_id in ids:
            image_filenames = os.listdir(
                os.path.join(args.dataset_path, subset, target_id))
            for image_filename in image_filenames:
                print('Encoding {}'.format(
                    os.path.join(args.dataset_path, subset, target_id,
コード例 #6
0
    for camID in range(1, 9):
        cam_feature_dir = os.path.join(args.feature_path,
                                       'camera{:d}'.format(camID))
        if not os.path.isdir(cam_feature_dir):
            os.makedirs(cam_feature_dir)

        # From DukeMTMCC format (cam, ID, frameID, left, top, width, height, conf, x, y, z)
        # To MOTChallenge format (frameID, ID, left, top, width, height, conf, x, y, z)
        camData = tracks[np.where(tracks[:, 0] == camID)][:, 1:]
        camData[:, (0, 1)] = camData[:, (1, 0)]

        ids = np.unique(camData[:, 1]).astype(np.int)
        print('All target IDs in cam{}: '.format(camID), ids)

        encoder = build_encoder(cfg_from_file(args.encoder_config))

        global target_id
        for target_id in ids:
            feature_filepath = os.path.join(cam_feature_dir,
                                            '{:06d}'.format(int(target_id)))
            detections = sample_detections(camData, target_id,
                                           args.sample_rate)
            if len(detections) > 0:
                features = []
                for i, detection in enumerate(detections):
                    frame_id, _, l, t, w, h, score, _, _, _ = detection
                    img = cv2.imread(
                        os.path.join(
                            args.duke_path,
                            'images/camera{}/{:06d}.jpg'.format(
コード例 #7
0
def test_get_component():
    detector = build_detector(dict(type='MMDetector'))
    encoder = build_encoder(dict(type='DGNetEncoder'))
    metric = build_metric(dict(type='IoUMetric'))
    matcher = build_matcher(dict(type='HungarianMatcher'))
    tracker = build_tracker(dict(type='Tracktor'))