def fit_tracks(filename: Text, instance_count: int): """Wraps `TrackCleaner` for easier cli api.""" labels = Labels.load_file(filename) video = labels.videos[0] frames = labels.find(video) TrackCleaner(instance_count=instance_count).run(frames=frames) # Rebuild list of tracks labels.tracks = list({ instance.track for frame in labels for instance in frame.instances if instance.track }) labels.tracks.sort(key=operator.attrgetter("spawned_on", "name")) # Save new file save_filename = filename save_filename = save_filename.replace(".slp", ".cleaned.slp") save_filename = save_filename.replace(".h5", ".cleaned.h5") save_filename = save_filename.replace(".json", ".cleaned.json") Labels.save_file(labels, save_filename) print(f"Saved: {save_filename}")
def retrack(): import argparse import operator import os import time from sleap import Labels parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to SLEAP project file") parser.add_argument( "-o", "--output", type=str, default=None, help="The output filename to use for the predicted data.", ) Tracker.add_cli_parser_args(parser) args = parser.parse_args() tracker_args = { key: val for key, val in vars(args).items() if val is not None } tracker = Tracker.make_tracker_by_name(**tracker_args) print(tracker) print("Loading predictions...") t0 = time.time() labels = Labels.load_file(args.data_path, args.data_path) frames = sorted(labels.labeled_frames, key=operator.attrgetter("frame_idx")) frames = frames # [:1000] print(f"Done loading predictions in {time.time() - t0} seconds.") print("Starting tracker...") frames = run_tracker(frames=frames, tracker=tracker) tracker.final_pass(frames) new_labels = Labels(labeled_frames=frames) if args.output: output_path = args.output else: out_dir = os.path.dirname(args.data_path) out_name = os.path.basename( args.data_path) + f".{tracker.get_name()}.slp" output_path = os.path.join(out_dir, out_name) print(f"Saving: {output_path}") Labels.save_file(new_labels, output_path)
def save_predictions_from_cli(args, predicted_frames, prediction_metadata=None): from sleap import Labels if args.output: output_path = args.output elif args.video_path: out_dir = os.path.dirname(args.video_path) out_name = os.path.basename(args.video_path) + ".predictions.slp" output_path = os.path.join(out_dir, out_name) elif args.labels: out_dir = os.path.dirname(args.labels) out_name = os.path.basename(args.labels) + ".predictions.slp" output_path = os.path.join(out_dir, out_name) else: # We shouldn't ever get here but if we do, just save in working dir. output_path = "predictions.slp" labels = Labels(labeled_frames=predicted_frames, provenance=prediction_metadata) print(f"Saving: {output_path}") Labels.save_file(labels, output_path)
# Find the (frame, instance) pairs with score below threshold for frame in labels: for instance in frame: if hasattr(instance, "score"): if instance.score is not None: if instance.score < SCORE_THRESHOLD: lf_inst_list.append((frame, instance)) if lf_inst_list: print(f"Removing {len(lf_inst_list)} instances...") # Remove each of the instances for frame, instance in lf_inst_list: labels.remove_instance(frame, instance, in_transaction=True) else: print("No instances to remove.") # Save track as filename print("Saving track as " + str(filename[0:11])) labels.tracks[0].name = filename[0:11] # Save the updated project file Labels.save_file(labels, out_filename) print(out_filename)
def evaluate_model( cfg: TrainingJobConfig, labels_reader: LabelsReader, model: Model, save: bool = True, split_name: Text = "test", ) -> Tuple[Labels, Dict[Text, Any]]: """Evaluate a trained model and save metrics and predictions. Args: cfg: The `TrainingJobConfig` associated with the model. labels_reader: A `LabelsReader` pipeline generator that reads the ground truth data to evaluate. model: The `sleap.nn.model.Model` instance to evaluate. save: If True, save the predictions and metrics to the model folder. split_name: String name to append to the saved filenames. Returns: A tuple of `(labels_pr, metrics)`. `labels_pr` will contain the predicted labels. `metrics` will contain the evaluated metrics given the predictions, or None if the metrics failed to be computed. """ # Setup predictor for evaluation. head_config = cfg.model.heads.which_oneof() if isinstance(head_config, CentroidsHeadConfig): predictor = TopDownPredictor( centroid_config=cfg, centroid_model=model, confmap_config=None, confmap_model=None, ) elif isinstance(head_config, CenteredInstanceConfmapsHeadConfig): predictor = TopDownPredictor( centroid_config=None, centroid_model=None, confmap_config=cfg, confmap_model=model, ) elif isinstance(head_config, MultiInstanceConfig): predictor = sleap.nn.inference.BottomUpPredictor(bottomup_config=cfg, bottomup_model=model) elif isinstance(head_config, SingleInstanceConfmapsHeadConfig): predictor = sleap.nn.inference.SingleInstancePredictor( confmap_config=cfg, confmap_model=model) else: raise ValueError("Unrecognized model type:", head_config) # Predict. labels_pr = predictor.predict(labels_reader, make_labels=True) # Compute metrics. try: metrics = evaluate(labels_reader.labels, labels_pr) except: logger.warning("Failed to compute metrics.") metrics = None # Save. if save: labels_pr_path = os.path.join(cfg.outputs.run_path, f"labels_pr.{split_name}.slp") Labels.save_file(labels_pr, labels_pr_path) logger.info("Saved predictions: %s", labels_pr_path) if metrics is not None: metrics_path = os.path.join(cfg.outputs.run_path, f"metrics.{split_name}.npz") np.savez_compressed(metrics_path, **{"metrics": metrics}) logger.info("Saved metrics: %s", metrics_path) if metrics is not None: logger.info("OKS mAP: %f", metrics["oks_voc.mAP"]) return labels_pr, metrics
def main(): parser = argparse.ArgumentParser() parser.add_argument("input_path", help="Path to input file.") parser.add_argument( "-o", "--output", default="", help="Path to output file (optional)." ) parser.add_argument( "--format", default="slp", help="Output format. Default ('slp') is SLEAP dataset; " "'analysis' results in analysis.h5 file; " "'h5' or 'json' results in SLEAP dataset " "with specified file format.", ) parser.add_argument( "--video", default="", help="Path to video (if needed for conversion)." ) args = parser.parse_args() video_callback = Labels.make_video_callback([os.path.dirname(args.input_path)]) try: labels = Labels.load_file(args.input_path, video_search=video_callback) except TypeError: print("Input file isn't SLEAP dataset so attempting other importers...") from sleap.io.format import read video_path = args.video if args.video else None labels = read( args.input_path, for_object="labels", as_format="*", video_search=video_callback, video=video_path, ) if args.format == "analysis": from sleap.info.write_tracking_h5 import main as write_analysis if args.output: output_path = args.output else: output_path = args.input_path output_path = re.sub("(\.json(\.zip)?|\.h5|\.slp)$", "", output_path) output_path = output_path + ".analysis.h5" write_analysis(labels, output_path=output_path, all_frames=True) elif args.output: print(f"Output SLEAP dataset: {args.output}") Labels.save_file(labels, args.output) elif args.format in ("slp", "h5", "json"): output_path = f"{args.input_path}.{args.format}" print(f"Output SLEAP dataset: {output_path}") Labels.save_file(labels, output_path) else: print("You didn't specify how to convert the file.") print(args)