def test_save_labels_with_images(min_labels_slp, tmpdir): labels = Labels(min_labels_slp.labeled_frames) labels.append(LabeledFrame(video=labels.video, frame_idx=1)) labels.suggestions.append(SuggestionFrame(video=labels.video, frame_idx=2)) fn = os.path.join(tmpdir, "test_user_only.slp") labels.save( fn, with_images=True, embed_all_labeled=False, embed_suggested=False, ) assert Labels.load_file(fn).video.embedded_frame_inds == [0] fn = os.path.join(tmpdir, "test_all_labeled.slp") labels.save( fn, with_images=True, embed_all_labeled=True, embed_suggested=False, ) assert Labels.load_file(fn).video.embedded_frame_inds == [0, 1] fn = os.path.join(tmpdir, "test_suggested.slp") labels.save( fn, with_images=True, embed_all_labeled=False, embed_suggested=True, ) assert Labels.load_file(fn).video.embedded_frame_inds == [0, 2] fn = os.path.join(tmpdir, "test_all.slp") labels.save( fn, with_images=True, embed_all_labeled=True, embed_suggested=True, ) assert Labels.load_file(fn).video.embedded_frame_inds == [0, 1, 2]
def loadProjectFile(self, filename: Optional[str] = None): """ Loads given labels file into GUI. Args: filename: The path to the saved labels dataset. If None, then don't do anything. Returns: None: """ if len(filename) == 0: return gui_video_callback = Labels.make_gui_video_callback( search_paths=[os.path.dirname(filename)]) has_loaded = False labels = None if type(filename) == Labels: labels = filename filename = None has_loaded = True else: try: labels = Labels.load_file(filename, video_search=gui_video_callback) has_loaded = True except ValueError as e: print(e) QMessageBox(text=f"Unable to load {filename}.").exec_() if has_loaded: self.loadLabelsObject(labels, filename)
def test_labels_json(tmpdir, multi_skel_vid_labels): json_file_path = os.path.join(tmpdir, "dataset.json") if os.path.isfile(json_file_path): os.remove(json_file_path) # Save to json Labels.save_json(labels=multi_skel_vid_labels, filename=json_file_path) # Make sure the filename is there assert os.path.isfile(json_file_path) # Lets load the labels back in and make sure we haven't lost anything. loaded_labels = Labels.load_json(json_file_path) # Check that we have the same thing _check_labels_match(multi_skel_vid_labels, loaded_labels) # Check that we don't have the very same objects assert not multi_skel_vid_labels.skeletons[0] is loaded_labels.skeletons[0] assert not multi_skel_vid_labels.nodes[3] in loaded_labels.nodes assert not multi_skel_vid_labels.videos[0] is loaded_labels.videos[0] # Reload json using objects from original labels # We'll also test load_file() here loaded_labels = Labels.load_file(json_file_path, match_to=multi_skel_vid_labels) # Check that we now do have the same objects assert multi_skel_vid_labels.skeletons[0] in loaded_labels.skeletons assert multi_skel_vid_labels.nodes[3] in loaded_labels.nodes assert multi_skel_vid_labels.videos[0] in loaded_labels.videos
def test_provenance(tmpdir): labels = Labels(provenance=dict(source="test_provenance")) filename = os.path.join(tmpdir, "test.slp") # Add a video without a full path labels.add_video(Video.from_filename("small_robot.mp4")) Labels.save_file(filename=filename, labels=labels) labels = Labels.load_file(filename) print(labels.provenance) assert labels.provenance["source"] == "test_provenance"
def test_path_fix(tmpdir): labels = Labels() filename = os.path.join(tmpdir, "test.h5") # Add a video without a full path labels.add_video(Video.from_filename("small_robot.mp4")) Labels.save_hdf5(filename=filename, labels=labels) # Pass the path to the directory with the video labels = Labels.load_file(filename, video_search="tests/data/videos/") # Make sure we got the actual video path by searching that directory assert len(labels.videos) == 1 assert labels.videos[0].filename == "tests/data/videos/small_robot.mp4"
def test_path_fix_with_new_full_path(tmpdir): labels = Labels() filename = os.path.join(tmpdir, "test.h5") # Add video with bad filename labels.add_video(Video.from_filename("foo.mp4")) Labels.save_hdf5(filename=filename, labels=labels) # Pass list of full video paths to use instead of those in labels labels = Labels.load_file( filename, video_search=["tests/data/videos/small_robot.mp4"]) # Make sure we got the actual video path by searching that directory assert len(labels.videos) == 1 assert labels.videos[0].filename == "tests/data/videos/small_robot.mp4"
def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to labels json file") args = parser.parse_args() video_callback = Labels.make_video_callback( [os.path.dirname(args.data_path)]) labels = Labels.load_file(args.data_path, video_search=video_callback) print(f"Labeled frames: {len(labels)}") print(f"Tracks: {len(labels.tracks)}") print(f"Video files:") total_user_frames = 0 for vid in labels.videos: lfs = labels.find(vid) first_idx = min((lf.frame_idx for lf in lfs)) last_idx = max((lf.frame_idx for lf in lfs)) tracks = {inst.track for lf in lfs for inst in lf} concurrent_count = max((len(lf.instances) for lf in lfs)) user_frames = labels.get_labeled_frame_count(vid, "user") total_user_frames += user_frames print(f" {vid.filename}") print(f" labeled frames from {first_idx} to {last_idx}") print(f" labeled frames: {len(lfs)}") print(f" user labeled frames: {user_frames}") print(f" tracks: {len(tracks)}") print(f" max instances in frame: {concurrent_count}") print(f"Total user labeled frames: {total_user_frames}") if labels.provenance: print() print(f"Provenance:") for key, value in labels.provenance.items(): print(f" {key}: {value}")
def check_tracks(labels, gt_filename, limit=None): gt_lfs = Labels.load_file(gt_filename).labeled_frames lfs = labels.labeled_frames if limit: gt_lfs = gt_lfs[limit] lfs = lfs[limit] for lf, gt_lf in zip(lfs, gt_lfs): for inst, gt_inst in zip(lf, gt_lf): if inst.track is None and gt_inst.track is None: continue elif inst.track is None or gt_inst.track is None: print(lf.frame_idx, "None mismatch") return False elif inst.track.name != gt_inst.track.name: print(lf.frame_idx, inst.track.name, gt_inst.track.name) return False return True
def test_deserialize_suggestions(small_robot_mp4_vid, tmpdir): dummy_video = small_robot_mp4_vid dummy_skeleton = Skeleton() dummy_instance = Instance(dummy_skeleton) dummy_frame = LabeledFrame(dummy_video, frame_idx=0, instances=[dummy_instance]) labels = Labels() labels.append(dummy_frame) suggestions = VideoFrameSuggestions.suggest( labels=labels, params=dict(method="sample", per_video=13) ) labels.set_suggestions(suggestions) filename = os.path.join(tmpdir, "new_suggestions.h5") Labels.save_file(filename=filename, labels=labels) new_suggestion_labels = Labels.load_file(filename) assert len(suggestions) == len(new_suggestion_labels.suggestions) assert [frame.frame_idx for frame in suggestions] == [ frame.frame_idx for frame in new_suggestion_labels.suggestions ]
def centered_pair_labels(): return Labels.load_file(TEST_JSON_LABELS)
node_names=get_nodes_as_np_strings(labels), tracks=locations_matrix, track_occupancy=occupancy_matrix, ) write_occupancy_file(output_path, data_dict, transpose=True) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to labels json file") parser.add_argument( "--all-frames", dest="all_frames", action="store_const", const=True, default=False, help="include all frames without predictions", ) args = parser.parse_args() video_callback = Labels.make_video_callback([os.path.dirname(args.data_path)]) labels = Labels.load_file(args.data_path, video_search=video_callback) output_path = re.sub("(\.json(\.zip)?|\.h5|\.slp)$", "", args.data_path) output_path = output_path + ".tracking.h5" main(labels, output_path=output_path, all_frames=args.all_frames)
def main(f, dir): filename = "tests/data/json_format_v2/centered_pair_predictions.json" # gt_filename = "tests/data/json_format_v2/centered_pair_predictions.json" # filename = "/Users/tabris/Documents/pni/tracking/000000.mp4.predictions.UDenseNet-ish.2_4_6_8.best_val.centroid_tracker.h5" labels = Labels.load_file(filename, video_search=Labels.make_video_callback( [os.path.dirname(filename)])) trackers = dict( simple=sleap.nn.tracker.simple.SimpleTracker, flow=sleap.nn.tracker.flow.FlowTracker, ) matchers = dict( hungarian=sleap.nn.tracker.components.hungarian_matching, greedy=sleap.nn.tracker.components.greedy_matching, ) similarities = dict( instance=sleap.nn.tracker.components.instance_similarity, centroid=sleap.nn.tracker.components.centroid_distance, iou=sleap.nn.tracker.components.instance_iou, ) scales = ( 1, 0.25, ) def make_tracker(tracker_name, matcher_name, sim_name, scale=0): tracker = trackers[tracker_name]( matching_function=matchers[matcher_name], similarity_function=similarities[sim_name], ) if scale: tracker.candidate_maker.img_scale = scale return tracker def make_filename(tracker_name, matcher_name, sim_name, scale=0): return f"{dir}{tracker_name}_{int(scale * 100)}_{matcher_name}_{sim_name}.h5" def make_tracker_and_filename(*args, **kwargs): tracker = make_tracker(*args, **kwargs) filename = make_filename(*args, **kwargs) return tracker, filename frames = sorted(labels.labeled_frames, key=operator.attrgetter("frame_idx")) # [:100] for tracker_name in trackers.keys(): for matcher_name in matchers.keys(): for sim_name in similarities.keys(): if tracker_name == "flow": # If this tracker supports scale, try multiple scales for scale in scales: tracker, gt_filename = make_tracker_and_filename( tracker_name=tracker_name, matcher_name=matcher_name, sim_name=sim_name, scale=scale, ) f(frames, tracker, gt_filename) else: tracker, gt_filename = make_tracker_and_filename( tracker_name=tracker_name, matcher_name=matcher_name, sim_name=sim_name, scale=0, ) f(frames, tracker, gt_filename)
def midpoint_grid_labels(): return Labels.load_file(TEST_MIDPOINT_GRID_LABELS, video_search=TEST_MIDPOINT_GRID_LABELS)
def legacy_grid_labels(): return Labels.load_file(TEST_LEGACY_GRID_LABELS, video_search=TEST_LEGACY_GRID_LABELS)
def min_labels_slp(): return Labels.load_file(TEST_SLP_MIN_LABELS)
def min_labels(): return Labels.load_file(TEST_JSON_MIN_LABELS)
def min_labels_robot(): return Labels.load_file(TEST_SLP_MIN_LABELS_ROBOT)
def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to labels json file") parser.add_argument("--verbose", default=False, action="store_true") args = parser.parse_args() video_callback = Labels.make_video_callback( [os.path.dirname(args.data_path)]) labels = Labels.load_file(args.data_path, video_search=video_callback) print(f"Labeled frames: {len(labels)}") print(f"Tracks: {len(labels.tracks)}") print(f"Video files:") total_user_frames = 0 for vid in labels.videos: print(f" {vid.filename}") lfs = labels.find(vid) print(f" labeled frames: {len(lfs)}") if not lfs: continue first_idx = min((lf.frame_idx for lf in lfs)) last_idx = max((lf.frame_idx for lf in lfs)) tracks = {inst.track for lf in lfs for inst in lf} concurrent_count = max((len(lf.instances) for lf in lfs)) user_frames = labels.get_labeled_frame_count(vid, "user") total_user_frames += user_frames print(f" labeled frames from {first_idx} to {last_idx}") print(f" user labeled frames: {user_frames}") print(f" tracks: {len(tracks)}") print(f" max instances in frame: {concurrent_count}") if args.verbose: print() print( " labeled frames: bounding box top left (x, y)" ) for lf in lfs: bb_cords = [ f"({inst.bounding_box[0]:.2f}, {inst.bounding_box[1]:.2f}){'^' if hasattr(inst, 'score') else ''}" for inst in lf.instances ] pt_str = " ".join(bb_cords) print( f" frame {lf.frame_idx}: {len(lf.instances)} instances -> {pt_str}" ) print() print(f"Total user labeled frames: {total_user_frames}") if labels.provenance: print() print(f"Provenance:") for key, value in labels.provenance.items(): print(f" {key}: {value}")
def main_cli(): import argparse from sleap.util import frame_list parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to labels json file") parser.add_argument( "-o", "--output", type=str, default=None, help="The output filename for the video", ) parser.add_argument("-f", "--fps", type=int, default=15, help="Frames per second") parser.add_argument("--scale", type=float, default=1.0, help="Output image scale") parser.add_argument("--crop", type=str, default="", help="Crop size as <width>,<height>") parser.add_argument( "--frames", type=frame_list, default="", help= "list of frames to predict. Either comma separated list (e.g. 1,2,3) or " "a range separated by hyphen (e.g. 1-3). (default is entire video)", ) parser.add_argument("--video-index", type=int, default=0, help="Index of video in labels dataset") args = parser.parse_args() labels = Labels.load_file(args.data_path, video_search=[os.path.dirname(args.data_path)]) if args.video_index >= len(labels.videos): raise IndexError(f"There is no video with index {args.video_index}.") vid = labels.videos[args.video_index] if args.frames is None: frames = sorted([lf.frame_idx for lf in labels if len(lf.instances)]) else: frames = args.frames filename = args.output or args.data_path + ".avi" try: crop_size_xy = list(map(int, args.crop.split(","))) except: crop_size_xy = None save_labeled_video( filename=filename, labels=labels, video=vid, frames=frames, fps=args.fps, scale=args.scale, crop_size_xy=crop_size_xy, ) print(f"Video saved as: {filename}")
def centered_pair_predictions(): return Labels.load_file(TEST_JSON_PREDICTIONS)
""" Returns string of instance counts to show in table. Args: instance_list: The list of instances to count. Returns: String with numbers of user/predicted instances. """ prediction_count = len( list(filter(lambda inst: hasattr(inst, "score"), instance_list)) ) user_count = len(instance_list) - prediction_count return f"{user_count}/{prediction_count}" if __name__ == "__main__": # file_a = "tests/data/json_format_v1/centered_pair.json" # file_b = "tests/data/json_format_v2/centered_pair_predictions.json" file_a = "files/merge/a.h5" file_b = "files/merge/b.h5" base_labels = Labels.load_file(file_a) new_labels = Labels.load_file(file_b) app = QtWidgets.QApplication() win = MergeDialog(base_labels, new_labels) win.show() app.exec_()