def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to labels json file") args = parser.parse_args() video_callback = Labels.make_video_callback( [os.path.dirname(args.data_path)]) labels = Labels.load_file(args.data_path, video_search=video_callback) print(f"Labeled frames: {len(labels)}") print(f"Tracks: {len(labels.tracks)}") print(f"Video files:") total_user_frames = 0 for vid in labels.videos: lfs = labels.find(vid) first_idx = min((lf.frame_idx for lf in lfs)) last_idx = max((lf.frame_idx for lf in lfs)) tracks = {inst.track for lf in lfs for inst in lf} concurrent_count = max((len(lf.instances) for lf in lfs)) user_frames = labels.get_labeled_frame_count(vid, "user") total_user_frames += user_frames print(f" {vid.filename}") print(f" labeled frames from {first_idx} to {last_idx}") print(f" labeled frames: {len(lfs)}") print(f" user labeled frames: {user_frames}") print(f" tracks: {len(tracks)}") print(f" max instances in frame: {concurrent_count}") print(f"Total user labeled frames: {total_user_frames}") if labels.provenance: print() print(f"Provenance:") for key, value in labels.provenance.items(): print(f" {key}: {value}")
def main(f, dir): filename = "tests/data/json_format_v2/centered_pair_predictions.json" # gt_filename = "tests/data/json_format_v2/centered_pair_predictions.json" # filename = "/Users/tabris/Documents/pni/tracking/000000.mp4.predictions.UDenseNet-ish.2_4_6_8.best_val.centroid_tracker.h5" labels = Labels.load_file(filename, video_search=Labels.make_video_callback( [os.path.dirname(filename)])) trackers = dict( simple=sleap.nn.tracker.simple.SimpleTracker, flow=sleap.nn.tracker.flow.FlowTracker, ) matchers = dict( hungarian=sleap.nn.tracker.components.hungarian_matching, greedy=sleap.nn.tracker.components.greedy_matching, ) similarities = dict( instance=sleap.nn.tracker.components.instance_similarity, centroid=sleap.nn.tracker.components.centroid_distance, iou=sleap.nn.tracker.components.instance_iou, ) scales = ( 1, 0.25, ) def make_tracker(tracker_name, matcher_name, sim_name, scale=0): tracker = trackers[tracker_name]( matching_function=matchers[matcher_name], similarity_function=similarities[sim_name], ) if scale: tracker.candidate_maker.img_scale = scale return tracker def make_filename(tracker_name, matcher_name, sim_name, scale=0): return f"{dir}{tracker_name}_{int(scale * 100)}_{matcher_name}_{sim_name}.h5" def make_tracker_and_filename(*args, **kwargs): tracker = make_tracker(*args, **kwargs) filename = make_filename(*args, **kwargs) return tracker, filename frames = sorted(labels.labeled_frames, key=operator.attrgetter("frame_idx")) # [:100] for tracker_name in trackers.keys(): for matcher_name in matchers.keys(): for sim_name in similarities.keys(): if tracker_name == "flow": # If this tracker supports scale, try multiple scales for scale in scales: tracker, gt_filename = make_tracker_and_filename( tracker_name=tracker_name, matcher_name=matcher_name, sim_name=sim_name, scale=scale, ) f(frames, tracker, gt_filename) else: tracker, gt_filename = make_tracker_and_filename( tracker_name=tracker_name, matcher_name=matcher_name, sim_name=sim_name, scale=0, ) f(frames, tracker, gt_filename)
def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to labels json file") parser.add_argument("--verbose", default=False, action="store_true") args = parser.parse_args() video_callback = Labels.make_video_callback( [os.path.dirname(args.data_path)]) labels = Labels.load_file(args.data_path, video_search=video_callback) print(f"Labeled frames: {len(labels)}") print(f"Tracks: {len(labels.tracks)}") print(f"Video files:") total_user_frames = 0 for vid in labels.videos: print(f" {vid.filename}") lfs = labels.find(vid) print(f" labeled frames: {len(lfs)}") if not lfs: continue first_idx = min((lf.frame_idx for lf in lfs)) last_idx = max((lf.frame_idx for lf in lfs)) tracks = {inst.track for lf in lfs for inst in lf} concurrent_count = max((len(lf.instances) for lf in lfs)) user_frames = labels.get_labeled_frame_count(vid, "user") total_user_frames += user_frames print(f" labeled frames from {first_idx} to {last_idx}") print(f" user labeled frames: {user_frames}") print(f" tracks: {len(tracks)}") print(f" max instances in frame: {concurrent_count}") if args.verbose: print() print( " labeled frames: bounding box top left (x, y)" ) for lf in lfs: bb_cords = [ f"({inst.bounding_box[0]:.2f}, {inst.bounding_box[1]:.2f}){'^' if hasattr(inst, 'score') else ''}" for inst in lf.instances ] pt_str = " ".join(bb_cords) print( f" frame {lf.frame_idx}: {len(lf.instances)} instances -> {pt_str}" ) print() print(f"Total user labeled frames: {total_user_frames}") if labels.provenance: print() print(f"Provenance:") for key, value in labels.provenance.items(): print(f" {key}: {value}")
node_names=get_nodes_as_np_strings(labels), tracks=locations_matrix, track_occupancy=occupancy_matrix, ) write_occupancy_file(output_path, data_dict, transpose=True) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to labels json file") parser.add_argument( "--all-frames", dest="all_frames", action="store_const", const=True, default=False, help="include all frames without predictions", ) args = parser.parse_args() video_callback = Labels.make_video_callback([os.path.dirname(args.data_path)]) labels = Labels.load_file(args.data_path, video_search=video_callback) output_path = re.sub("(\.json(\.zip)?|\.h5|\.slp)$", "", args.data_path) output_path = output_path + ".tracking.h5" main(labels, output_path=output_path, all_frames=args.all_frames)