def main(): """ Program main entry point. """ args = parse_args() devkit = kitti.Devkit(args.kitti_dir) data_source = devkit.create_data_source(args.sequence, kitti.OBJECT_CLASSES_PEDESTRIANS, min_confidence=args.min_confidence) with open(args.observation_cost_model, "rb") as f: observation_cost_model = pickle.load(f) with open(args.transition_cost_model, "rb") as f: transition_cost_model = pickle.load(f) tracker = min_cost_flow_tracker.MinCostFlowTracker( args.entry_exit_cost, observation_cost_model, transition_cost_model, args.max_num_misses, args.miss_rate, args.cnn_model, optimizer_window_len=args.optimizer_window_len) pymot_adapter = min_cost_flow_pymot.PymotAdapter(tracker) visualization = pymotutils.MonoVisualization( update_ms=kitti.CAMERA_UPDATE_IN_MS, window_shape=kitti.CAMERA_IMAGE_SHAPE, online_tracking_visualization=draw_online_tracking_results) application = pymotutils.Application(data_source) visualization.enable_videowriter("/tmp/detections.avi") application.process_data(pymot_adapter, visualization) application.compute_trajectories(interpolation=True) visualization.enable_videowriter("/tmp/trajectories.avi") application.play_hypotheses(visualization)
def main(): """ Program main entry point. """ args = parse_args() devkit = motchallenge.Devkit(args.mot_dir, args.detection_dir) data_source = devkit.create_data_source(args.sequence) data_source.apply_nonmaxima_suppression(max_bbox_overlap=0.5) with open(args.observation_cost_model, "rb") as f: observation_cost_model = pickle.load(f) with open(args.transition_cost_model, "rb") as f: transition_cost_model = pickle.load(f) tracker = min_cost_flow_tracker.MinCostFlowTracker( args.entry_exit_cost, observation_cost_model, transition_cost_model, args.max_num_misses, args.miss_rate, args.cnn_model, optimizer_window_len=args.optimizer_window_len, observation_cost_bias=args.observation_cost_bias) pymot_adapter = min_cost_flow_pymot.PymotAdapter(tracker) # Compute a suitable window shape. image_shape = data_source.peek_image_shape()[::-1] aspect_ratio = float(image_shape[0]) / image_shape[1] window_shape = int(aspect_ratio * 600), 600 visualization = pymotutils.MonoVisualization( update_ms=25, window_shape=window_shape, online_tracking_visualization=draw_online_tracking_results) application = pymotutils.Application(data_source) application.process_data(pymot_adapter, visualization) application.compute_trajectories(interpolation=True) if args.show_output: visualization.enable_videowriter( os.path.join(args.output_dir, "%s.avi" % args.sequence)) application.play_hypotheses(visualization) if args.output_dir is not None: pymotutils.motchallenge_io.write_hypotheses( os.path.join(args.output_dir, "%s.txt" % args.sequence), application.hypotheses)
def main(): """Main program entry point.""" args = parse_args() devkit = kitti.Devkit(args.kitti_dir) data_source = devkit.create_data_source( args.sequence, kitti.OBJECT_CLASSES_PEDESTRIANS, min_confidence=args.min_confidence) visualization = pymotutils.MonoVisualization( update_ms=kitti.CAMERA_UPDATE_IN_MS, window_shape=kitti.CAMERA_IMAGE_SHAPE) application = pymotutils.Application(data_source) # First, play detections. Then, show ground truth tracks. application.play_detections(visualization) application.play_track_set(data_source.ground_truth, visualization)
def main(): """Main program entry point.""" args = parse_args() devkit = motchallenge.Devkit(args.mot_dir) data_source = devkit.create_data_source(args.sequence, args.min_confidence) # Compute a suitable window shape. image_shape = data_source.peek_image_shape()[::-1] aspect_ratio = float(image_shape[0]) / image_shape[1] window_shape = int(aspect_ratio * 600), 600 visualization = pymotutils.MonoVisualization( update_ms=data_source.update_ms, window_shape=window_shape) application = pymotutils.Application(data_source) # First, play detections. Then, show ground truth tracks. application.play_detections(visualization) application.play_track_set(data_source.ground_truth, visualization)