Пример #1
0
def predict_image(detector, image_list, batch_size=1):
    batch_loop_cnt = math.ceil(float(len(image_list)) / batch_size)
    for i in range(batch_loop_cnt):
        start_index = i * batch_size
        end_index = min((i + 1) * batch_size, len(image_list))
        batch_image_list = image_list[start_index:end_index]
        if FLAGS.run_benchmark:
            # warmup
            detector.predict(
                batch_image_list, FLAGS.threshold, repeats=10, add_timer=False)
            # run benchmark
            detector.predict(
                batch_image_list, FLAGS.threshold, repeats=10, add_timer=True)

            cm, gm, gu = get_current_memory_mb()
            detector.cpu_mem += cm
            detector.gpu_mem += gm
            detector.gpu_util += gu
            print('Test iter {}'.format(i))
        else:
            results = detector.predict(batch_image_list, FLAGS.threshold)
            visualize(
                batch_image_list,
                results,
                detector.pred_config.labels,
                output_dir=FLAGS.output_dir,
                threshold=FLAGS.threshold)
Пример #2
0
def predict_image(detector, image_list):
    results = []
    image_list.sort()
    for i, img_file in enumerate(image_list):
        frame = cv2.imread(img_file)
        if FLAGS.run_benchmark:
            detector.predict([frame], FLAGS.threshold, warmup=10, repeats=10)
            cm, gm, gu = get_current_memory_mb()
            detector.cpu_mem += cm
            detector.gpu_mem += gm
            detector.gpu_util += gu
            print('Test iter {}, file name:{}'.format(i, img_file))
        else:
            online_tlwhs, online_scores, online_ids = detector.predict(
                [frame], FLAGS.threshold)
            online_im = mot_vis.plot_tracking(frame,
                                              online_tlwhs,
                                              online_ids,
                                              online_scores,
                                              frame_id=i)
            if FLAGS.save_images:
                if not os.path.exists(FLAGS.output_dir):
                    os.makedirs(FLAGS.output_dir)
                img_name = os.path.split(img_file)[-1]
                out_path = os.path.join(FLAGS.output_dir, img_name)
                cv2.imwrite(out_path, online_im)
                print("save result to: " + out_path)
Пример #3
0
def predict_image(detector, image_list):
    for i, img_file in enumerate(image_list):
        if FLAGS.run_benchmark:
            # warmup
            detector.predict([img_file],
                             FLAGS.threshold,
                             repeats=10,
                             add_timer=False)
            # run benchmark
            detector.predict([img_file],
                             FLAGS.threshold,
                             repeats=10,
                             add_timer=True)
            cm, gm, gu = get_current_memory_mb()
            detector.cpu_mem += cm
            detector.gpu_mem += gm
            detector.gpu_util += gu
            print('Test iter {}, file name:{}'.format(i, img_file))
        else:
            results = detector.predict([img_file], FLAGS.threshold)
            if not os.path.exists(FLAGS.output_dir):
                os.makedirs(FLAGS.output_dir)
            draw_pose(img_file,
                      results,
                      visual_thread=FLAGS.threshold,
                      save_dir=FLAGS.output_dir)
Пример #4
0
def predict_image(detector, reid_model, image_list):
    image_list.sort()
    for i, img_file in enumerate(image_list):
        frame = cv2.imread(img_file)
        if FLAGS.run_benchmark:
            # warmup
            pred_dets, pred_xyxys = detector.predict([frame],
                                                     FLAGS.scaled,
                                                     FLAGS.threshold,
                                                     repeats=10,
                                                     add_timer=True)
            # run benchmark
            pred_dets, pred_xyxys = detector.predict([frame],
                                                     FLAGS.scaled,
                                                     FLAGS.threshold,
                                                     repeats=10,
                                                     add_timer=True)
            cm, gm, gu = get_current_memory_mb()
            detector.cpu_mem += cm
            detector.gpu_mem += gm
            detector.gpu_util += gu
            print('Test iter {}, file name:{}'.format(i, img_file))
        else:
            pred_dets, pred_xyxys = detector.predict([frame], FLAGS.scaled,
                                                     FLAGS.threshold)

        if len(pred_dets) == 1 and np.sum(pred_dets) == 0:
            print('Frame {} has no object, try to modify score threshold.'.
                  format(i))
            online_im = frame
        else:
            # reid process
            crops = reid_model.get_crops(pred_xyxys, frame)

            if FLAGS.run_benchmark:
                # warmup
                online_tlwhs, online_scores, online_ids = reid_model.predict(
                    crops, pred_dets, repeats=10, add_timer=False)
                # run benchmark
                online_tlwhs, online_scores, online_ids = reid_model.predict(
                    crops, pred_dets, repeats=10, add_timer=False)
            else:
                online_tlwhs, online_scores, online_ids = reid_model.predict(
                    crops, pred_dets)
                online_im = plot_tracking(frame,
                                          online_tlwhs,
                                          online_ids,
                                          online_scores,
                                          frame_id=i)

        if FLAGS.save_images:
            if not os.path.exists(FLAGS.output_dir):
                os.makedirs(FLAGS.output_dir)
            img_name = os.path.split(img_file)[-1]
            out_path = os.path.join(FLAGS.output_dir, img_name)
            cv2.imwrite(out_path, online_im)
            print("save result to: " + out_path)
Пример #5
0
def topdown_unite_predict(detector,
                          topdown_keypoint_detector,
                          image_list,
                          keypoint_batch_size=1):
    det_timer = detector.get_timer()
    for i, img_file in enumerate(image_list):
        # Decode image in advance in det + pose prediction
        det_timer.preprocess_time_s.start()
        image, _ = decode_image(img_file, {})
        det_timer.preprocess_time_s.end()

        if FLAGS.run_benchmark:
            results = detector.predict([image],
                                       FLAGS.det_threshold,
                                       warmup=10,
                                       repeats=10)
            cm, gm, gu = get_current_memory_mb()
            detector.cpu_mem += cm
            detector.gpu_mem += gm
            detector.gpu_util += gu
        else:
            results = detector.predict([image], FLAGS.det_threshold)

        if results['boxes_num'] == 0:
            continue

        keypoint_res = predict_with_given_det(
            image, results, topdown_keypoint_detector, keypoint_batch_size,
            FLAGS.det_threshold, FLAGS.keypoint_threshold, FLAGS.run_benchmark)

        if FLAGS.run_benchmark:
            cm, gm, gu = get_current_memory_mb()
            topdown_keypoint_detector.cpu_mem += cm
            topdown_keypoint_detector.gpu_mem += gm
            topdown_keypoint_detector.gpu_util += gu
        else:
            if not os.path.exists(FLAGS.output_dir):
                os.makedirs(FLAGS.output_dir)
            draw_pose(img_file,
                      keypoint_res,
                      visual_thread=FLAGS.keypoint_threshold,
                      save_dir=FLAGS.output_dir)
Пример #6
0
def predict_image(detector, reid_model, image_list):
    results = []
    image_list.sort()
    for i, img_file in enumerate(image_list):
        frame = cv2.imread(img_file)
        if FLAGS.run_benchmark:
            pred_bboxes, pred_scores = detector.predict([frame],
                                                        FLAGS.threshold,
                                                        warmup=10,
                                                        repeats=10)
            cm, gm, gu = get_current_memory_mb()
            detector.cpu_mem += cm
            detector.gpu_mem += gm
            detector.gpu_util += gu
            print('Test iter {}, file name:{}'.format(i, img_file))
        else:
            pred_bboxes, pred_scores = detector.predict([frame],
                                                        FLAGS.threshold)

        # process
        bbox_tlwh = np.concatenate(
            (pred_bboxes[:,
                         0:2], pred_bboxes[:, 2:4] - pred_bboxes[:, 0:2] + 1),
            axis=1)
        crops, pred_scores = reid_model.get_crops(pred_bboxes,
                                                  frame,
                                                  pred_scores,
                                                  w=64,
                                                  h=192)

        if FLAGS.run_benchmark:
            online_tlwhs, online_scores, online_ids = reid_model.predict(
                crops, bbox_tlwh, pred_scores, warmup=10, repeats=10)
        else:
            online_tlwhs, online_scores, online_ids = reid_model.predict(
                crops, bbox_tlwh, pred_scores)
            online_im = mot_vis.plot_tracking(frame,
                                              online_tlwhs,
                                              online_ids,
                                              online_scores,
                                              frame_id=i)

            if FLAGS.save_images:
                if not os.path.exists(FLAGS.output_dir):
                    os.makedirs(FLAGS.output_dir)
                img_name = os.path.split(img_file)[-1]
                out_path = os.path.join(FLAGS.output_dir, img_name)
                cv2.imwrite(out_path, online_im)
                print("save result to: " + out_path)
Пример #7
0
    def predict_skeleton(self, skeleton_list, run_benchmark=False, repeats=1):
        results = []
        for i, skeleton in enumerate(skeleton_list):
            if run_benchmark:
                # preprocess
                inputs = self.preprocess(skeleton)  # warmup
                self.det_times.preprocess_time_s.start()
                inputs = self.preprocess(skeleton)
                self.det_times.preprocess_time_s.end()

                # model prediction
                result = self.predict(repeats=repeats)  # warmup
                self.det_times.inference_time_s.start()
                result = self.predict(repeats=repeats)
                self.det_times.inference_time_s.end(repeats=repeats)

                # postprocess
                result_warmup = self.postprocess(inputs, result)  # warmup
                self.det_times.postprocess_time_s.start()
                result = self.postprocess(inputs, result)
                self.det_times.postprocess_time_s.end()
                self.det_times.img_num += len(skeleton)

                cm, gm, gu = get_current_memory_mb()
                self.cpu_mem += cm
                self.gpu_mem += gm
                self.gpu_util += gu
            else:
                # preprocess
                self.det_times.preprocess_time_s.start()
                inputs = self.preprocess(skeleton)
                self.det_times.preprocess_time_s.end()

                # model prediction
                self.det_times.inference_time_s.start()
                result = self.predict()
                self.det_times.inference_time_s.end()

                # postprocess
                self.det_times.postprocess_time_s.start()
                result = self.postprocess(inputs, result)
                self.det_times.postprocess_time_s.end()
                self.det_times.img_num += len(skeleton)

            results.append(result)
        return results
Пример #8
0
def predict_image(detector,
                  image_list,
                  threshold,
                  output_dir,
                  save_images=True,
                  run_benchmark=False):
    results = []
    num_classes = detector.num_classes
    data_type = 'mcmot' if num_classes > 1 else 'mot'
    ids2names = detector.pred_config.labels

    image_list.sort()
    for frame_id, img_file in enumerate(image_list):
        frame = cv2.imread(img_file)
        if run_benchmark:
            # warmup
            detector.predict([img_file],
                             threshold,
                             repeats=10,
                             add_timer=False)
            # run benchmark
            detector.predict([img_file], threshold, repeats=10, add_timer=True)
            cm, gm, gu = get_current_memory_mb()
            detector.cpu_mem += cm
            detector.gpu_mem += gm
            detector.gpu_util += gu
            print('Test iter {}, file name:{}'.format(frame_id, img_file))
        else:
            online_tlwhs, online_scores, online_ids = detector.predict(
                [img_file], threshold)
            online_im = plot_tracking_dict(frame,
                                           num_classes,
                                           online_tlwhs,
                                           online_ids,
                                           online_scores,
                                           frame_id=frame_id,
                                           ids2names=ids2names)
            if save_images:
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir)
                img_name = os.path.split(img_file)[-1]
                out_path = os.path.join(output_dir, img_name)
                cv2.imwrite(out_path, online_im)
                print("save result to: " + out_path)
Пример #9
0
def mot_topdown_unite_predict(mot_detector,
                              topdown_keypoint_detector,
                              image_list,
                              keypoint_batch_size=1,
                              save_res=False):
    det_timer = mot_detector.get_timer()
    store_res = []
    image_list.sort()
    num_classes = mot_detector.num_classes
    for i, img_file in enumerate(image_list):
        # Decode image in advance in mot + pose prediction
        det_timer.preprocess_time_s.start()
        image, _ = decode_image(img_file, {})
        det_timer.preprocess_time_s.end()

        if FLAGS.run_benchmark:
            mot_results = mot_detector.predict_image([image],
                                                     run_benchmark=True,
                                                     repeats=10)

            cm, gm, gu = get_current_memory_mb()
            mot_detector.cpu_mem += cm
            mot_detector.gpu_mem += gm
            mot_detector.gpu_util += gu
        else:
            mot_results = mot_detector.predict_image([image], visual=False)

        online_tlwhs, online_scores, online_ids = mot_results[
            0]  # only support bs=1 in MOT model
        results = convert_mot_to_det(
            online_tlwhs[0],
            online_scores[0])  # only support single class for mot + pose
        if results['boxes_num'] == 0:
            continue

        keypoint_res = predict_with_given_det(image, results,
                                              topdown_keypoint_detector,
                                              keypoint_batch_size,
                                              FLAGS.run_benchmark)

        if save_res:
            save_name = img_file if isinstance(img_file, str) else i
            store_res.append([
                save_name, keypoint_res['bbox'],
                [keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]]
            ])
        if FLAGS.run_benchmark:
            cm, gm, gu = get_current_memory_mb()
            topdown_keypoint_detector.cpu_mem += cm
            topdown_keypoint_detector.gpu_mem += gm
            topdown_keypoint_detector.gpu_util += gu
        else:
            if not os.path.exists(FLAGS.output_dir):
                os.makedirs(FLAGS.output_dir)
            visualize_pose(img_file,
                           keypoint_res,
                           visual_thresh=FLAGS.keypoint_threshold,
                           save_dir=FLAGS.output_dir)

    if save_res:
        """
        1) store_res: a list of image_data
        2) image_data: [imageid, rects, [keypoints, scores]]
        3) rects: list of rect [xmin, ymin, xmax, ymax]
        4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list
        5) scores: mean of all joint conf
        """
        with open("det_keypoint_unite_image_results.json", 'w') as wf:
            json.dump(store_res, wf, indent=4)
Пример #10
0
    def predict_image(self,
                      image_list,
                      run_benchmark=False,
                      repeats=1,
                      visual=True,
                      seq_name=None):
        num_classes = self.num_classes
        image_list.sort()
        ids2names = self.pred_config.labels
        mot_results = []
        for frame_id, img_file in enumerate(image_list):
            batch_image_list = [img_file]  # bs=1 in MOT model
            frame, _ = decode_image(img_file, {})
            if run_benchmark:
                # preprocess
                inputs = self.preprocess(batch_image_list)  # warmup
                self.det_times.preprocess_time_s.start()
                inputs = self.preprocess(batch_image_list)
                self.det_times.preprocess_time_s.end()

                # model prediction
                result_warmup = self.predict(repeats=repeats)  # warmup
                self.det_times.inference_time_s.start()
                result = self.predict(repeats=repeats)
                self.det_times.inference_time_s.end(repeats=repeats)

                # postprocess
                result_warmup = self.postprocess(inputs, result)  # warmup
                self.det_times.postprocess_time_s.start()
                det_result = self.postprocess(inputs, result)
                self.det_times.postprocess_time_s.end()

                # tracking
                if self.use_reid:
                    det_result['frame_id'] = frame_id
                    det_result['seq_name'] = seq_name
                    det_result['ori_image'] = frame
                    det_result = self.reidprocess(det_result)
                result_warmup = self.tracking(det_result)
                self.det_times.tracking_time_s.start()
                if self.use_reid:
                    det_result = self.reidprocess(det_result)
                tracking_outs = self.tracking(det_result)
                self.det_times.tracking_time_s.end()
                self.det_times.img_num += 1

                cm, gm, gu = get_current_memory_mb()
                self.cpu_mem += cm
                self.gpu_mem += gm
                self.gpu_util += gu

            else:
                self.det_times.preprocess_time_s.start()
                inputs = self.preprocess(batch_image_list)
                self.det_times.preprocess_time_s.end()

                self.det_times.inference_time_s.start()
                result = self.predict()
                self.det_times.inference_time_s.end()

                self.det_times.postprocess_time_s.start()
                det_result = self.postprocess(inputs, result)
                self.det_times.postprocess_time_s.end()

                # tracking process
                self.det_times.tracking_time_s.start()
                if self.use_reid:
                    det_result['frame_id'] = frame_id
                    det_result['seq_name'] = seq_name
                    det_result['ori_image'] = frame
                    det_result = self.reidprocess(det_result)
                tracking_outs = self.tracking(det_result)
                self.det_times.tracking_time_s.end()
                self.det_times.img_num += 1

            online_tlwhs = tracking_outs['online_tlwhs']
            online_scores = tracking_outs['online_scores']
            online_ids = tracking_outs['online_ids']

            mot_results.append([online_tlwhs, online_scores, online_ids])

            if visual:
                if len(image_list) > 1 and frame_id % 10 == 0:
                    print('Tracking frame {}'.format(frame_id))
                frame, _ = decode_image(img_file, {})
                if isinstance(online_tlwhs, defaultdict):
                    im = plot_tracking_dict(
                        frame,
                        num_classes,
                        online_tlwhs,
                        online_ids,
                        online_scores,
                        frame_id=frame_id,
                        ids2names=[])
                else:
                    im = plot_tracking(
                        frame,
                        online_tlwhs,
                        online_ids,
                        online_scores,
                        frame_id=frame_id)
                save_dir = os.path.join(self.output_dir, seq_name)
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
                cv2.imwrite(
                    os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), im)

        return mot_results
Пример #11
0
    def predict_image(self,
                      image_list,
                      run_benchmark=False,
                      repeats=1,
                      visual=True):
        batch_loop_cnt = math.ceil(float(len(image_list)) / self.batch_size)
        results = []
        for i in range(batch_loop_cnt):
            start_index = i * self.batch_size
            end_index = min((i + 1) * self.batch_size, len(image_list))
            batch_image_list = image_list[start_index:end_index]
            if run_benchmark:
                # preprocess
                inputs = self.preprocess(batch_image_list)  # warmup
                self.det_times.preprocess_time_s.start()
                inputs = self.preprocess(batch_image_list)
                self.det_times.preprocess_time_s.end()

                # model prediction
                result = self.predict(repeats=repeats)  # warmup
                self.det_times.inference_time_s.start()
                result = self.predict(repeats=repeats)
                self.det_times.inference_time_s.end(repeats=repeats)

                # postprocess
                result_warmup = self.postprocess(inputs, result)  # warmup
                self.det_times.postprocess_time_s.start()
                result = self.postprocess(inputs, result)
                self.det_times.postprocess_time_s.end()
                self.det_times.img_num += len(batch_image_list)

                cm, gm, gu = get_current_memory_mb()
                self.cpu_mem += cm
                self.gpu_mem += gm
                self.gpu_util += gu
            else:
                # preprocess
                self.det_times.preprocess_time_s.start()
                inputs = self.preprocess(batch_image_list)
                self.det_times.preprocess_time_s.end()

                # model prediction
                self.det_times.inference_time_s.start()
                result = self.predict()
                self.det_times.inference_time_s.end()

                # postprocess
                self.det_times.postprocess_time_s.start()
                result = self.postprocess(inputs, result)
                self.det_times.postprocess_time_s.end()
                self.det_times.img_num += len(batch_image_list)

                if visual:
                    visualize(batch_image_list,
                              result,
                              output_dir=self.output_dir)

            results.append(result)
            if visual:
                print('Test iter {}'.format(i))

        results = self.merge_batch_result(results)
        return results
Пример #12
0
def topdown_unite_predict(detector,
                          topdown_keypoint_detector,
                          image_list,
                          keypoint_batch_size=1,
                          save_res=False):
    det_timer = detector.get_timer()
    store_res = []
    for i, img_file in enumerate(image_list):
        # Decode image in advance in det + pose prediction
        det_timer.preprocess_time_s.start()
        image, _ = decode_image(img_file, {})
        det_timer.preprocess_time_s.end()

        if FLAGS.run_benchmark:
            results = detector.predict_image([image],
                                             run_benchmark=True,
                                             repeats=10)

            cm, gm, gu = get_current_memory_mb()
            detector.cpu_mem += cm
            detector.gpu_mem += gm
            detector.gpu_util += gu
        else:
            results = detector.predict_image([image], visual=False)
        results = detector.filter_box(results, FLAGS.det_threshold)
        if results['boxes_num'] > 0:
            keypoint_res = predict_with_given_det(image, results,
                                                  topdown_keypoint_detector,
                                                  keypoint_batch_size,
                                                  FLAGS.run_benchmark)

            if save_res:
                save_name = img_file if isinstance(img_file, str) else i
                store_res.append([
                    save_name, keypoint_res['bbox'],
                    [keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]]
                ])
        else:
            results["keypoint"] = [[], []]
            keypoint_res = results
        if FLAGS.run_benchmark:
            cm, gm, gu = get_current_memory_mb()
            topdown_keypoint_detector.cpu_mem += cm
            topdown_keypoint_detector.gpu_mem += gm
            topdown_keypoint_detector.gpu_util += gu
        else:
            if not os.path.exists(FLAGS.output_dir):
                os.makedirs(FLAGS.output_dir)
            visualize_pose(img_file,
                           keypoint_res,
                           visual_thresh=FLAGS.keypoint_threshold,
                           save_dir=FLAGS.output_dir)
    if save_res:
        """
        1) store_res: a list of image_data
        2) image_data: [imageid, rects, [keypoints, scores]]
        3) rects: list of rect [xmin, ymin, xmax, ymax]
        4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list
        5) scores: mean of all joint conf
        """
        with open("det_keypoint_unite_image_results.json", 'w') as wf:
            json.dump(store_res, wf, indent=4)
def mot_keypoint_unite_predict_image(mot_model,
                                     keypoint_model,
                                     image_list,
                                     keypoint_batch_size=1):
    num_classes = mot_model.num_classes
    assert num_classes == 1, 'Only one category mot model supported for uniting keypoint deploy.'
    data_type = 'mot'
    image_list.sort()
    for i, img_file in enumerate(image_list):
        frame = cv2.imread(img_file)

        if FLAGS.run_benchmark:
            # warmup
            online_tlwhs, online_scores, online_ids = mot_model.predict(
                [frame], FLAGS.mot_threshold, repeats=10, add_timer=False)
            # run benchmark
            online_tlwhs, online_scores, online_ids = mot_model.predict(
                [frame], FLAGS.mot_threshold, repeats=10, add_timer=True)
            cm, gm, gu = get_current_memory_mb()
            mot_model.cpu_mem += cm
            mot_model.gpu_mem += gm
            mot_model.gpu_util += gu

        else:
            online_tlwhs, online_scores, online_ids = mot_model.predict(
                [frame], FLAGS.mot_threshold)

        keypoint_arch = keypoint_model.pred_config.arch
        if KEYPOINT_SUPPORT_MODELS[keypoint_arch] == 'keypoint_topdown':
            results = convert_mot_to_det(online_tlwhs, online_scores)
            keypoint_results = predict_with_given_det(frame, results,
                                                      keypoint_model,
                                                      keypoint_batch_size,
                                                      FLAGS.mot_threshold,
                                                      FLAGS.keypoint_threshold,
                                                      FLAGS.run_benchmark)

        else:
            if FLAGS.run_benchmark:
                keypoint_results = keypoint_model.predict(
                    [frame],
                    FLAGS.keypoint_threshold,
                    repeats=10,
                    add_timer=False)

            repeats = 10 if FLAGS.run_benchmark else 1
            keypoint_results = keypoint_model.predict([frame],
                                                      FLAGS.keypoint_threshold,
                                                      repeats=repeats)

        if FLAGS.run_benchmark:
            cm, gm, gu = get_current_memory_mb()
            keypoint_model.cpu_mem += cm
            keypoint_model.gpu_mem += gm
            keypoint_model.gpu_util += gu
        else:
            im = draw_pose(
                frame,
                keypoint_results,
                visual_thread=FLAGS.keypoint_threshold,
                returnimg=True,
                ids=online_ids[0] if KEYPOINT_SUPPORT_MODELS[keypoint_arch]
                == 'keypoint_topdown' else None)

            online_im = plot_tracking_dict(im,
                                           num_classes,
                                           online_tlwhs,
                                           online_ids,
                                           online_scores,
                                           frame_id=i)
            if FLAGS.save_images:
                if not os.path.exists(FLAGS.output_dir):
                    os.makedirs(FLAGS.output_dir)
                img_name = os.path.split(img_file)[-1]
                out_path = os.path.join(FLAGS.output_dir, img_name)
                cv2.imwrite(out_path, online_im)
                print("save result to: " + out_path)
Пример #14
0
    def predict_image(self,
                      image_list,
                      run_benchmark=False,
                      repeats=1,
                      visual=True,
                      seq_name=None):
        mot_results = []
        num_classes = self.num_classes
        image_list.sort()
        ids2names = self.pred_config.labels
        data_type = 'mcmot' if num_classes > 1 else 'mot'
        for frame_id, img_file in enumerate(image_list):
            batch_image_list = [img_file]  # bs=1 in MOT model
            if run_benchmark:
                # preprocess
                inputs = self.preprocess(batch_image_list)  # warmup
                self.det_times.preprocess_time_s.start()
                inputs = self.preprocess(batch_image_list)
                self.det_times.preprocess_time_s.end()

                # model prediction
                result_warmup = self.predict(repeats=repeats)  # warmup
                self.det_times.inference_time_s.start()
                result = self.predict(repeats=repeats)
                self.det_times.inference_time_s.end(repeats=repeats)

                # postprocess
                result_warmup = self.postprocess(inputs, result)  # warmup
                self.det_times.postprocess_time_s.start()
                det_result = self.postprocess(inputs, result)
                self.det_times.postprocess_time_s.end()

                # tracking
                result_warmup = self.tracking(det_result)
                self.det_times.tracking_time_s.start()
                online_tlwhs, online_scores, online_ids = self.tracking(
                    det_result)
                self.det_times.tracking_time_s.end()
                self.det_times.img_num += 1

                cm, gm, gu = get_current_memory_mb()
                self.cpu_mem += cm
                self.gpu_mem += gm
                self.gpu_util += gu

            else:
                self.det_times.preprocess_time_s.start()
                inputs = self.preprocess(batch_image_list)
                self.det_times.preprocess_time_s.end()

                self.det_times.inference_time_s.start()
                result = self.predict()
                self.det_times.inference_time_s.end()

                self.det_times.postprocess_time_s.start()
                det_result = self.postprocess(inputs, result)
                self.det_times.postprocess_time_s.end()

                # tracking process
                self.det_times.tracking_time_s.start()
                online_tlwhs, online_scores, online_ids = self.tracking(
                    det_result)
                self.det_times.tracking_time_s.end()
                self.det_times.img_num += 1

            if visual:
                if len(image_list) > 1 and frame_id % 10 == 0:
                    print('Tracking frame {}'.format(frame_id))
                frame, _ = decode_image(img_file, {})

                im = plot_tracking_dict(frame,
                                        num_classes,
                                        online_tlwhs,
                                        online_ids,
                                        online_scores,
                                        frame_id=frame_id,
                                        ids2names=ids2names)
                if seq_name is None:
                    seq_name = image_list[0].split('/')[-2]
                save_dir = os.path.join(self.output_dir, seq_name)
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
                cv2.imwrite(
                    os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), im)

            mot_results.append([online_tlwhs, online_scores, online_ids])
        return mot_results
def mot_keypoint_unite_predict_image(mot_model,
                                     keypoint_model,
                                     image_list,
                                     keypoint_batch_size=1):
    image_list.sort()
    for i, img_file in enumerate(image_list):
        frame = cv2.imread(img_file)

        if FLAGS.run_benchmark:
            online_tlwhs, online_scores, online_ids = mot_model.predict(
                [frame], FLAGS.mot_threshold, warmup=10, repeats=10)
            cm, gm, gu = get_current_memory_mb()
            mot_model.cpu_mem += cm
            mot_model.gpu_mem += gm
            mot_model.gpu_util += gu

        else:
            online_tlwhs, online_scores, online_ids = mot_model.predict(
                [frame], FLAGS.mot_threshold)

        keypoint_arch = keypoint_model.pred_config.arch
        if KEYPOINT_SUPPORT_MODELS[keypoint_arch] == 'keypoint_topdown':
            results = convert_mot_to_det(online_tlwhs, online_scores)
            keypoint_results = predict_with_given_det(frame, results,
                                                      keypoint_model,
                                                      keypoint_batch_size,
                                                      FLAGS.mot_threshold,
                                                      FLAGS.keypoint_threshold,
                                                      FLAGS.run_benchmark)

        else:
            warmup = 10 if FLAGS.run_benchmark else 0
            repeats = 10 if FLAGS.run_benchmark else 1
            keypoint_results = keypoint_model.predict([frame],
                                                      FLAGS.keypoint_threshold,
                                                      warmup=warmup,
                                                      repeats=repeats)

        if FLAGS.run_benchmark:
            cm, gm, gu = get_current_memory_mb()
            keypoint_model.cpu_mem += cm
            keypoint_model.gpu_mem += gm
            keypoint_model.gpu_util += gu
        else:
            im = draw_pose(
                frame,
                keypoint_results,
                visual_thread=FLAGS.keypoint_threshold,
                returnimg=True,
                ids=online_ids if KEYPOINT_SUPPORT_MODELS[keypoint_arch]
                == 'keypoint_topdown' else None)

            online_im = mot_vis.plot_tracking(im,
                                              online_tlwhs,
                                              online_ids,
                                              online_scores,
                                              frame_id=i)

            if FLAGS.save_images:
                if not os.path.exists(FLAGS.output_dir):
                    os.makedirs(FLAGS.output_dir)
                img_name = os.path.split(img_file)[-1]
                out_path = os.path.join(FLAGS.output_dir, img_name)
                cv2.imwrite(out_path, online_im)
                print("save result to: " + out_path)