def test_tracker_by_name(tracker, similarity, match, count): t = Tracker.make_tracker_by_name("flow", "instance", "greedy", clean_instance_count=2) t.track([]) t.final_pass([])
def make_cli_parser(): import argparse from sleap.util import frame_list parser = argparse.ArgumentParser() # Add args for entire pipeline parser.add_argument( "video_path", type=str, nargs="?", default="", help="Path to video file" ) parser.add_argument( "-m", "--model", dest="models", action="append", help="Path to trained model directory (with training_config.json). " "Multiple models can be specified, each preceded by --model.", ) parser.add_argument( "--frames", type=frame_list, default="", help="List of frames to predict. Either comma separated list (e.g. 1,2,3) or " "a range separated by hyphen (e.g. 1-3, for 1,2,3). (default is entire video)", ) parser.add_argument( "--only-labeled-frames", action="store_true", default=False, help="Only run inference on labeled frames (when running on labels dataset file).", ) parser.add_argument( "--only-suggested-frames", action="store_true", default=False, help="Only run inference on suggested frames (when running on labels dataset file).", ) parser.add_argument( "-o", "--output", type=str, default=None, help="The output filename to use for the predicted data.", ) parser.add_argument( "--labels", type=str, default=None, help="Path to labels dataset file (for inference on multiple videos or for re-tracking pre-existing predictions).", ) # TODO: better video parameters parser.add_argument( "--video.dataset", type=str, default="", help="The dataset for HDF5 videos." ) parser.add_argument( "--video.input_format", type=str, default="", help="The input_format for HDF5 videos.", ) device_group = parser.add_mutually_exclusive_group(required=False) device_group.add_argument( "--cpu", action="store_true", help="Run inference only on CPU. If not specified, will use available GPU.", ) device_group.add_argument( "--first-gpu", action="store_true", help="Run inference on the first GPU, if available.", ) device_group.add_argument( "--last-gpu", action="store_true", help="Run inference on the last GPU, if available.", ) device_group.add_argument( "--gpu", type=int, default=0, help="Run inference on the i-th GPU specified." ) # Add args for each predictor class for predictor_name, predictor_class in CLI_PREDICTORS.items(): if "peak_threshold" in attr.fields_dict(predictor_class): # get the default value to show in help string, although we'll # use None as default so that unspecified vals won't be passed to # builder. default_val = attr.fields_dict(predictor_class)["peak_threshold"].default parser.add_argument( f"--{predictor_name}.peak_threshold", type=float, default=None, help=f"Threshold to use when finding peaks in {predictor_class.__name__} (default: {default_val}).", ) if "batch_size" in attr.fields_dict(predictor_class): default_val = attr.fields_dict(predictor_class)["batch_size"].default parser.add_argument( f"--{predictor_name}.batch_size", type=int, default=None, help=f"Batch size to use for model inference in {predictor_class.__name__} (default: {default_val}).", ) # Add args for tracking Tracker.add_cli_parser_args(parser, arg_scope="tracking") parser.add_argument( "--test-pipeline", default=False, action="store_true", help="Test pipeline construction without running anything.", ) return parser
def make_tracker_from_cli(policy_args): if "tracking" in policy_args: tracker = Tracker.make_tracker_by_name(**policy_args["tracking"]) return tracker return None