Ejemplo n.º 1
0
def save_probe_dir(video_id, track_id, raw_img, bbox):
    """ save query images in probe directory"""
    new_track_id = convert_number_to_image_form(int(track_id),
                                                start_digit='2',
                                                max_length=3)
    dir = 'tracking_images/{}/{}'.format(video_id, new_track_id)

    # create folder if not exist
    mkdir_if_missing(dir)

    # write images
    obj_img = raw_img[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
    h, w, _ = obj_img.shape

    if h == 0 or w == 0:
        return

    obj_img = cv2.resize(obj_img, (128, 256), interpolation=cv2.INTER_LINEAR)

    cur_idx = len(os.listdir(dir))
    cv2.imwrite(
        '{}/{}C1T{}F{}.jpg'.format(
            dir, new_track_id, new_track_id,
            convert_number_to_image_form(cur_idx, start_digit='',
                                         max_length=3)), obj_img)
def video_feed():
    global loader
    loader = LoadCamera(file_path[0], args.img_size)

    # save output
    out = None
    if args.is_saved:
        mkdir_if_missing(args.save_dir)

        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(
            '{}/{}.avi'.format(args.save_dir,
                               os.path.basename(file_path[0][:-4])), fourcc,
            10, (args.image_width, args.image_height), True)

    person_handler.set_out(out)

    tracker = None
    if args.tracking_type == "sort":
        tracker = Sort()
    elif args.tracking_type == "deep_sort":
        tracker = Tracker(metric)

    person_handler.set_tracker(tracker)

    return Response(person_handler.loop_and_detect(loader, vis, ''),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
 def save_probe_dir(self, track_id, raw_img, bbox):
     """ save query images in probe directory"""
     if self.save_probe:
         dir = 'probe/{}'.format(track_id)
         mkdir_if_missing(dir)
         if self.no_frame % self.freq == 0:
             cv2.imwrite('{}/{}.png'.format(dir, str(uuid.uuid4())),
                         raw_img[bbox[1]: bbox[3], bbox[0]: bbox[2], :])
Ejemplo n.º 4
0
    def download_data(self):
        if osp.exists(self.dataset_dir):
            return

        mkdir_if_missing(self.dataset_dir)
        fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))

        print('Downloading iLIDS-VID dataset')
        urllib.request.urlretrieve(self.dataset_url, fpath)

        print('Extracting files')
        tar = tarfile.open(fpath)
        tar.extractall(path=self.dataset_dir)
        tar.close()
Ejemplo n.º 5
0
    def download_data(self):
        if osp.exists(self.dataset_dir):
            return

        print('Creating directory {}'.format(self.dataset_dir))
        mkdir_if_missing(self.dataset_dir)
        fpath = osp.join(self.dataset_dir, 'prid_450s.zip')

        print('Downloading PRID450S dataset')
        urllib.request.urlretrieve(self.dataset_url, fpath)

        print('Extracting files')
        zip_ref = zipfile.ZipFile(fpath, 'r')
        zip_ref.extractall(self.dataset_dir)
        zip_ref.close()
    def download_data(self):
        if osp.exists(self.dataset_dir):
            return

        print('Creating directory {}'.format(self.dataset_dir))
        mkdir_if_missing(self.dataset_dir)
        fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))

        print('Downloading DukeMTMC-VideoReID dataset')
        urllib.request.urlretrieve(self.dataset_url, fpath)

        print('Extracting files')
        zip_ref = zipfile.ZipFile(fpath, 'r')
        zip_ref.extractall(self.dataset_dir)
        zip_ref.close()
def video_feed():
    global loader
    loader = LoadCamera(file_path[0],
                        img_size=args.img_size,
                        resize_mode=args.mode)

    out = None
    if args.is_saved:
        mkdir_if_missing(args.save_dir)

        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(
            '{}/{}.avi'.format(args.save_dir,
                               os.path.basename(file_path[0][:-4])), fourcc,
            10, (args.image_width, args.image_height), True)

    person_handler.set_out(out)
    person_handler.init_tracker()

    return Response(person_handler.online_process(loader),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
Ejemplo n.º 8
0
    def preprocess_split(self):
        """
        This function is a bit complex and ugly, what it does is
        1. extract data from cuhk-03.mat and save as png images
        2. create 20 classic splits (Li et al. CVPR'14)
        3. create new split (Zhong et al. CVPR'17)
        """
        if osp.exists(self.imgs_labeled_dir) \
                and osp.exists(self.imgs_detected_dir) \
                and osp.exists(self.split_classic_det_json_path) \
                and osp.exists(self.split_classic_lab_json_path) \
                and osp.exists(self.split_new_det_json_path) \
                and osp.exists(self.split_new_lab_json_path):
            return

        mkdir_if_missing(self.imgs_detected_dir)
        mkdir_if_missing(self.imgs_labeled_dir)

        print('Extract image data from "{}" and save as png'.format(
            self.raw_mat_path))
        mat = h5py.File(self.raw_mat_path, 'r')

        def _deref(ref):
            return mat[ref][:].T

        def _process_images(img_refs, campid, pid, save_dir):
            img_paths = []  # Note: some persons only have images for one view
            for imgid, img_ref in enumerate(img_refs):
                img = _deref(img_ref)
                if img.size == 0 or img.ndim < 3:
                    continue  # skip empty cell
                # images are saved with the following format, index-1 (ensure uniqueness)
                # campid: index of camera pair (1-5)
                # pid: index of person in 'campid'-th camera pair
                # viewid: index of view, {1, 2}
                # imgid: index of image, (1-10)
                viewid = 1 if imgid < 5 else 2
                img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(
                    campid + 1, pid + 1, viewid, imgid + 1)
                img_path = osp.join(save_dir, img_name)
                if not osp.isfile(img_path):
                    imsave(img_path, img)
                img_paths.append(img_path)
            return img_paths

        def _extract_img(image_type):
            print('Processing {} images ...'.format(image_type))
            meta_data = []
            imgs_dir = self.imgs_detected_dir if image_type == 'detected' else self.imgs_labeled_dir
            for campid, camp_ref in enumerate(mat[image_type][0]):
                camp = _deref(camp_ref)
                num_pids = camp.shape[0]
                for pid in range(num_pids):
                    img_paths = _process_images(camp[pid, :], campid, pid,
                                                imgs_dir)
                    assert len(
                        img_paths) > 0, 'campid{}-pid{} has no images'.format(
                            campid, pid)
                    meta_data.append((campid + 1, pid + 1, img_paths))
                print('- done camera pair {} with {} identities'.format(
                    campid + 1, num_pids))
            return meta_data

        meta_detected = _extract_img('detected')
        meta_labeled = _extract_img('labeled')

        def _extract_classic_split(meta_data, test_split):
            train, test = [], []
            num_train_pids, num_test_pids = 0, 0
            num_train_imgs, num_test_imgs = 0, 0
            for i, (campid, pid, img_paths) in enumerate(meta_data):

                if [campid, pid] in test_split:
                    for img_path in img_paths:
                        camid = int(osp.basename(img_path).split('_')
                                    [2]) - 1  # make it 0-based
                        test.append((img_path, num_test_pids, camid))
                    num_test_pids += 1
                    num_test_imgs += len(img_paths)
                else:
                    for img_path in img_paths:
                        camid = int(osp.basename(img_path).split('_')
                                    [2]) - 1  # make it 0-based
                        train.append((img_path, num_train_pids, camid))
                    num_train_pids += 1
                    num_train_imgs += len(img_paths)
            return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs

        print('Creating classic splits (# = 20) ...')
        splits_classic_det, splits_classic_lab = [], []
        for split_ref in mat['testsets'][0]:
            test_split = _deref(split_ref).tolist()

            # create split for detected images
            train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \
                _extract_classic_split(meta_detected, test_split)
            splits_classic_det.append({
                'train': train,
                'query': test,
                'gallery': test,
                'num_train_pids': num_train_pids,
                'num_train_imgs': num_train_imgs,
                'num_query_pids': num_test_pids,
                'num_query_imgs': num_test_imgs,
                'num_gallery_pids': num_test_pids,
                'num_gallery_imgs': num_test_imgs,
            })

            # create split for labeled images
            train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \
                _extract_classic_split(meta_labeled, test_split)
            splits_classic_lab.append({
                'train': train,
                'query': test,
                'gallery': test,
                'num_train_pids': num_train_pids,
                'num_train_imgs': num_train_imgs,
                'num_query_pids': num_test_pids,
                'num_query_imgs': num_test_imgs,
                'num_gallery_pids': num_test_pids,
                'num_gallery_imgs': num_test_imgs,
            })

        write_json(splits_classic_det, self.split_classic_det_json_path)
        write_json(splits_classic_lab, self.split_classic_lab_json_path)

        def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):
            tmp_set = []
            unique_pids = set()
            for idx in idxs:
                img_name = filelist[idx][0]
                camid = int(img_name.split('_')[2]) - 1  # make it 0-based
                pid = pids[idx]
                if relabel:
                    pid = pid2label[pid]
                img_path = osp.join(img_dir, img_name)
                tmp_set.append((img_path, int(pid), camid))
                unique_pids.add(pid)
            return tmp_set, len(unique_pids), len(idxs)

        def _extract_new_split(split_dict, img_dir):
            train_idxs = split_dict['train_idx'].flatten() - 1  # index-0
            pids = split_dict['labels'].flatten()
            train_pids = set(pids[train_idxs])
            pid2label = {pid: label for label, pid in enumerate(train_pids)}
            query_idxs = split_dict['query_idx'].flatten() - 1
            gallery_idxs = split_dict['gallery_idx'].flatten() - 1
            filelist = split_dict['filelist'].flatten()
            train_info = _extract_set(filelist,
                                      pids,
                                      pid2label,
                                      train_idxs,
                                      img_dir,
                                      relabel=True)
            query_info = _extract_set(filelist,
                                      pids,
                                      pid2label,
                                      query_idxs,
                                      img_dir,
                                      relabel=False)
            gallery_info = _extract_set(filelist,
                                        pids,
                                        pid2label,
                                        gallery_idxs,
                                        img_dir,
                                        relabel=False)
            return train_info, query_info, gallery_info

        print('Creating new split for detected images (767/700) ...')
        train_info, query_info, gallery_info = _extract_new_split(
            loadmat(self.split_new_det_mat_path),
            self.imgs_detected_dir,
        )
        split = [{
            'train': train_info[0],
            'query': query_info[0],
            'gallery': gallery_info[0],
            'num_train_pids': train_info[1],
            'num_train_imgs': train_info[2],
            'num_query_pids': query_info[1],
            'num_query_imgs': query_info[2],
            'num_gallery_pids': gallery_info[1],
            'num_gallery_imgs': gallery_info[2],
        }]
        write_json(split, self.split_new_det_json_path)

        print('Creating new split for labeled images (767/700) ...')
        train_info, query_info, gallery_info = _extract_new_split(
            loadmat(self.split_new_lab_mat_path),
            self.imgs_labeled_dir,
        )
        split = [{
            'train': train_info[0],
            'query': query_info[0],
            'gallery': gallery_info[0],
            'num_train_pids': train_info[1],
            'num_train_imgs': train_info[2],
            'num_query_pids': query_info[1],
            'num_query_imgs': query_info[2],
            'num_gallery_pids': gallery_info[1],
            'num_gallery_imgs': gallery_info[2],
        }]
        write_json(split, self.split_new_lab_json_path)
    parser.add_argument('--output-dir',
                        default='videos',
                        type=str,
                        help='Directory of output videos')
    parser.add_argument('--f',
                        dest='frame_rate',
                        default=10,
                        type=int,
                        help='FPS or Frames Per Second')
    parser.add_argument('--c',
                        dest='codec',
                        default='libx264',
                        type=str,
                        help='codec type for saving videos')
    parser.add_argument('--img-ext',
                        default='jpg',
                        type=str,
                        help='image extension')
    parser.add_argument('--vid-ext',
                        default='mp4',
                        type=str,
                        help='video extension')

    args = parser.parse_args()

    # create output directory if it does not exist
    mkdir_if_missing(args.output_dir)

    for sub_dir in os.listdir(args.images_dir):
        parse_images_to_video(os.path.join(args.images_dir, sub_dir), args)
Ejemplo n.º 10
0
p_encoder = gdet.create_box_encoder(model_filename=args.p_tracker_weights, batch_size=8)

print('Load Vehicle Appearance ...')
v_encoder = gdet.create_box_encoder(model_filename=args.v_tracker_weights, batch_size=8)

print('Load Label Map')
cls_dict = load_cls_dict(args.data_path)
cls_out = load_cls_out(args.cls_out, cls_dict)
print(cls_out)

coordinates_out = None
if args.save_coordinates:
    coordinates_out = open('coordinates.txt', 'w')

print('Load Object Detection model ...')
person_handler = PersonHandler(args, p_encoder=p_encoder, v_encoder=v_encoder, cls_out=cls_out,
                               coordinates_out=coordinates_out)
if args.is_saved:
    mkdir_if_missing(args.save_vid)
    person_handler.set_saved_dir(args.save_vid)

if args.save_tracks:
    mkdir_if_missing(args.track_dir)
    person_handler.set_track_dir(args.track_dir)

person_handler.set_colors()
person_handler.init_tracker()

print('Process Videos in Offline Mode')
loader = LoadImages(args.inputs, img_size=args.img_size, resize_mode=args.mode, od_model=args.od_model)
person_handler.offline_process(loader)