def fit_tracks(filename: Text, instance_count: int): """Wraps `TrackCleaner` for easier cli api.""" labels = Labels.load_file(filename) video = labels.videos[0] frames = labels.find(video) TrackCleaner(instance_count=instance_count).run(frames=frames) # Rebuild list of tracks labels.tracks = list({ instance.track for frame in labels for instance in frame.instances if instance.track }) labels.tracks.sort(key=operator.attrgetter("spawned_on", "name")) # Save new file save_filename = filename save_filename = save_filename.replace(".slp", ".cleaned.slp") save_filename = save_filename.replace(".h5", ".cleaned.h5") save_filename = save_filename.replace(".json", ".cleaned.json") Labels.save_file(labels, save_filename) print(f"Saved: {save_filename}")
def demo_gui(): from sleap.gui.dialogs.formbuilder import YamlFormWidget from sleap import Labels from PySide2.QtWidgets import QApplication labels = Labels.load_file( "tests/data/json_format_v2/centered_pair_predictions.json" ) options_lists = dict(node=labels.skeletons[0].node_names) app = QApplication() win = YamlFormWidget.from_name( "suggestions", title="Generate Suggestions", field_options_lists=options_lists ) def demo_suggestions(params): print(params) x = VideoFrameSuggestions.suggest(params=params, labels=labels) for suggested_frame in x: print( suggested_frame.video.backend.filename, suggested_frame.frame_idx, suggested_frame.group, ) win.mainAction.connect(demo_suggestions) win.show() app.exec_()
def predict_subprocess( self, item_for_inference: ItemForInference, append_results: bool = False, waiting_callback: Optional[Callable] = None, ) -> Tuple[Text, bool]: """Runs inference in a subprocess.""" cli_args, output_path = self.make_predict_cli_call(item_for_inference) print("Command line call:") print(" \\\n".join(cli_args)) print() with sub.Popen(cli_args) as proc: while proc.poll() is None: if waiting_callback is not None: if waiting_callback() == -1: # -1 signals user cancellation return "", False time.sleep(0.1) print(f"Process return code: {proc.returncode}") success = proc.returncode == 0 if success and append_results: # Load frames from inference into results list new_inference_labels = Labels.load_file(output_path, match_to=self.labels) self.results.extend(new_inference_labels.labeled_frames) return output_path, success
def retrack(): import argparse import operator import os import time from sleap import Labels parser = argparse.ArgumentParser() parser.add_argument("data_path", help="Path to SLEAP project file") parser.add_argument( "-o", "--output", type=str, default=None, help="The output filename to use for the predicted data.", ) Tracker.add_cli_parser_args(parser) args = parser.parse_args() tracker_args = { key: val for key, val in vars(args).items() if val is not None } tracker = Tracker.make_tracker_by_name(**tracker_args) print(tracker) print("Loading predictions...") t0 = time.time() labels = Labels.load_file(args.data_path, args.data_path) frames = sorted(labels.labeled_frames, key=operator.attrgetter("frame_idx")) frames = frames # [:1000] print(f"Done loading predictions in {time.time() - t0} seconds.") print("Starting tracker...") frames = run_tracker(frames=frames, tracker=tracker) tracker.final_pass(frames) new_labels = Labels(labeled_frames=frames) if args.output: output_path = args.output else: out_dir = os.path.dirname(args.data_path) out_name = os.path.basename( args.data_path) + f".{tracker.get_name()}.slp" output_path = os.path.join(out_dir, out_name) print(f"Saving: {output_path}") Labels.save_file(new_labels, output_path)
def predict_subprocess( self, item_for_inference: ItemForInference, append_results: bool = False, waiting_callback: Optional[Callable] = None, gui: bool = True, ) -> Tuple[Text, bool]: """Runs inference in a subprocess.""" cli_args, output_path = self.make_predict_cli_call(item_for_inference, gui=gui) print("Command line call:") print(" ".join(cli_args)) print() # Run inference CLI capturing output. with subprocess.Popen(cli_args, stdout=subprocess.PIPE) as proc: # Poll until finished. while proc.poll() is None: # Read line. line = proc.stdout.readline() line = line.decode().rstrip() if line.startswith("{"): # Parse line. line_data = json.loads(line) else: # Pass through non-json output. print(line) line_data = {} if waiting_callback is not None: # Pass line data to callback. ret = waiting_callback(**line_data) if ret == "cancel": # Stop if callback returned cancel signal. kill_process(proc.pid) print(f"Killed PID: {proc.pid}") return "", "canceled" time.sleep(0.05) print(f"Process return code: {proc.returncode}") success = proc.returncode == 0 if success and append_results: # Load frames from inference into results list new_inference_labels = Labels.load_file(output_path, match_to=self.labels) self.results.extend(new_inference_labels.labeled_frames) # Return "success" or return code if failed. ret = "success" if success else proc.returncode return output_path, ret
def demo_training_dialog(): app = QtWidgets.QApplication([]) filename = "tests/data/json_format_v1/centered_pair.json" labels = Labels.load_file(filename) win = LearningDialog("inference", labels_filename=filename, labels=labels) win.frame_selection = {"clip": {labels.videos[0]: (1, 2, 3, 4)}} # win.training_editor_widget.set_fields_from_key_val_dict({ # "_backbone_name": "unet", # "_heads_name": "centered_instance", # }) # # win.training_editor_widget.form_widgets["model"].set_field_enabled("_heads_name", False) win.show() app.exec_()
def skeleton(self): # cache skeleton so we only search once if self._skeleton is None and not self._tried_finding_skeleton: # if skeleton was saved in config, great! if self.config.data.labels.skeletons: self._skeleton = self.config.data.labels.skeletons[0] # otherwise try loading it from validation labels (much slower!) else: filename = self._get_file_path(f"labels_gt.val.slp") if filename is not None: val_labels = Labels.load_file(filename) if val_labels.skeletons: self._skeleton = val_labels.skeletons[0] # don't try loading again (needed in case it's still None) self._tried_finding_skeleton = True return self._skeleton
"""Returns mean of aligned points for instances.""" points = get_instances_points(instances) node_a, node_b = get_most_stable_node_pair(points, min_dist=4.0) aligned = align_instances(points, node_a=node_a, node_b=node_b) points_mean, points_std = get_mean_and_std_for_points(aligned) return points_mean if __name__ == "__main__": # filename = "tests/data/json_format_v2/centered_pair_predictions.json" # filename = "/Volumes/fileset-mmurthy/shruthi/code/sleap_expts/preds/screen_all.5pts_tmp_augment_200122/191210_102108_18159112_rig3_2.preds.h5" filename = "/Volumes/fileset-mmurthy/talmo/wt_gold_labeling/100919.sleap_wt_gold.13pt_init.n=288.junyu.h5" labels = Labels.load_file(filename) points = get_instances_points(labels.instances()) get_stable_node_pairs(points, np.array(labels.skeletons[0].node_names)) # import time # # t0 = time.time() # labels.add_instance( # frame=labels.find_first(video=labels.videos[0]), # instance=make_mean_instance(align_instances(points, 12, 0)) # ) # print(labels.find_first(video=labels.videos[0])) # print("time", time.time() - t0) # # Labels.save_file(labels, "mean.h5")
self.context.labels.remove_instance(lf, inst, in_transaction=True) if not lf.instances: self.context.labels.remove(lf) # Update caches since we skipped doing this after each deletion self.context.labels.update_cache() # Log update self.context.changestack_push("delete instances") if __name__ == "__main__": app = QtWidgets.QApplication([]) from sleap import Labels from sleap.gui.commands import CommandContext labels = Labels.load_file( "tests/data/json_format_v2/centered_pair_predictions.json") context = CommandContext.from_labels(labels) context.state["frame_idx"] = 123 context.state["video"] = labels.videos[0] context.state["has_frame_range"] = True context.state["frame_range"] = (10, 20) win = DeleteDialog(context=context) win.show() app.exec_()
def __init__( self, mode: Text, labels_filename: Text, labels: Optional[Labels] = None, skeleton: Optional["Skeleton"] = None, *args, **kwargs, ): super(LearningDialog, self).__init__() if labels is None: labels = Labels.load_file(labels_filename) if skeleton is None and labels.skeletons: skeleton = labels.skeletons[0] self.mode = mode self.labels_filename = labels_filename self.labels = labels self.skeleton = skeleton self._frame_selection = None self.current_pipeline = "" self.tabs = dict() self.shown_tab_names = [] self._cfg_getter = configs.TrainingConfigsGetter.make_from_labels_filename( labels_filename=self.labels_filename) # Layout for buttons buttons = QtWidgets.QDialogButtonBox() self.cancel_button = buttons.addButton( QtWidgets.QDialogButtonBox.Cancel) self.save_button = buttons.addButton( "Save configuration files...", QtWidgets.QDialogButtonBox.ApplyRole) self.run_button = buttons.addButton( "Run", QtWidgets.QDialogButtonBox.AcceptRole) buttons_layout = QtWidgets.QHBoxLayout() buttons_layout.addWidget(buttons, alignment=QtCore.Qt.AlignTop) buttons_layout_widget = QtWidgets.QWidget() buttons_layout_widget.setLayout(buttons_layout) self.pipeline_form_widget = TrainingPipelineWidget(mode=mode, skeleton=skeleton) if mode == "training": tab_label = "Training Pipeline" elif mode == "inference": # self.pipeline_form_widget = InferencePipelineWidget() tab_label = "Inference Pipeline" else: raise ValueError(f"Invalid LearningDialog mode: {mode}") self.tab_widget = QtWidgets.QTabWidget() self.tab_widget.addTab(self.pipeline_form_widget, tab_label) self.make_tabs() self.message_widget = QtWidgets.QLabel("") # Layout for entire dialog layout = QtWidgets.QVBoxLayout() layout.addWidget(self.tab_widget) layout.addWidget(self.message_widget) layout.addWidget(buttons_layout_widget) self.setLayout(layout) # Default to most recently trained pipeline (if there is one) self.set_pipeline_from_most_recent() # Connect functions to update pipeline tabs when pipeline changes self.pipeline_form_widget.updatePipeline.connect(self.set_pipeline) self.pipeline_form_widget.emitPipeline() self.connect_signals() # Connect actions for buttons buttons.accepted.connect(self.run) buttons.rejected.connect(self.reject) buttons.clicked.connect(self.on_button_click) # Connect button for previewing the training data if "_view_datagen" in self.pipeline_form_widget.buttons: self.pipeline_form_widget.buttons["_view_datagen"].clicked.connect( self.view_datagen)
def main(): parser = argparse.ArgumentParser() parser.add_argument("input_path", help="Path to input file.") parser.add_argument( "-o", "--output", default="", help="Path to output file (optional)." ) parser.add_argument( "--format", default="slp", help="Output format. Default ('slp') is SLEAP dataset; " "'analysis' results in analysis.h5 file; " "'h5' or 'json' results in SLEAP dataset " "with specified file format.", ) parser.add_argument( "--video", default="", help="Path to video (if needed for conversion)." ) args = parser.parse_args() video_callback = Labels.make_video_callback([os.path.dirname(args.input_path)]) try: labels = Labels.load_file(args.input_path, video_search=video_callback) except TypeError: print("Input file isn't SLEAP dataset so attempting other importers...") from sleap.io.format import read video_path = args.video if args.video else None labels = read( args.input_path, for_object="labels", as_format="*", video_search=video_callback, video=video_path, ) if args.format == "analysis": from sleap.info.write_tracking_h5 import main as write_analysis if args.output: output_path = args.output else: output_path = args.input_path output_path = re.sub("(\.json(\.zip)?|\.h5|\.slp)$", "", output_path) output_path = output_path + ".analysis.h5" write_analysis(labels, output_path=output_path, all_frames=True) elif args.output: print(f"Output SLEAP dataset: {args.output}") Labels.save_file(labels, args.output) elif args.format in ("slp", "h5", "json"): output_path = f"{args.input_path}.{args.format}" print(f"Output SLEAP dataset: {output_path}") Labels.save_file(labels, output_path) else: print("You didn't specify how to convert the file.") print(args)