def make_cmdline_parser(cls, starting_parser=None): """ Returns a command line parser that includes all parameters from the parent applet and adds export_plugin. """ arg_parser = DataExportApplet.make_cmdline_parser(starting_parser) arg_parser.add_argument('--export_plugin', help='Plugin name for exporting tracking results', required=False, default=None) return arg_parser
def make_cmdline_parser(cls, starting_parser=None): """ Returns a command line parser that includes all parameters from the parent applet and adds export_plugin. """ arg_parser = DataExportApplet.make_cmdline_parser(starting_parser) arg_parser.add_argument( '--export_plugin', help='Plugin name for exporting tracking results', required=False, default=None) return arg_parser
def make_cmdline_parser(cls, starting_parser=None): """ Returns a command line parser that includes all parameters from the parent applet and adds export_plugin. """ arg_parser = DataExportApplet.make_cmdline_parser(starting_parser) arg_parser.add_argument('--export_plugin', help='Plugin name for exporting tracking results', required=False, default=None) arg_parser.add_argument('--big_data_viewer_xml_file', help='Path to BigDataViewer XML file. Required if export_plugin=Fiji-MaMuT', required=False, default=None) return arg_parser
Return a list of all the internal datasets in an hdf5 file. """ allkeys = [] f.visit(allkeys.append) dataset_keys = [key for key in allkeys if isinstance(f[key], h5py.Dataset)] return dataset_keys if __name__ == "__main__": import sys import argparse # sys.argv += "/tmp/example_slice.h5/data /tmp/example_slice2.h5/data --export_drange=(0,255) --output_format=png --pipeline_result_drange=(1,2)".split() # Construct a parser with all the 'normal' export options, and add arg for prediction_image_paths. parser = DataExportApplet.make_cmdline_parser(argparse.ArgumentParser()) parser.add_argument("prediction_image_paths", nargs="+", help="Path(s) to your exported predictions.") parsed_args = parser.parse_args() parsed_args, unused_args = DataExportApplet.parse_known_cmdline_args( sys.argv[1:], parsed_args) # As a convenience, auto-determine the internal dataset path if possible. for index, input_path in enumerate(parsed_args.prediction_image_paths): path_comp = PathComponents(input_path, os.getcwd()) if not parsed_args.output_internal_path: parsed_args.output_internal_path = "segmentation" if path_comp.extension in PathComponents.HDF5_EXTS and path_comp.internalDatasetName == "": with h5py.File(path_comp.externalPath, "r") as f: all_internal_paths = all_dataset_internal_paths(f)
def all_dataset_internal_paths(f): """ Return a list of all the internal datasets in an hdf5 file. """ allkeys = [] f.visit(allkeys.append) dataset_keys = filter(lambda key: isinstance(f[key], h5py.Dataset), allkeys) return dataset_keys if __name__ == "__main__": import sys import argparse # Construct a parser with all the 'normal' export options, and add arg for input_path. parser = DataExportApplet.make_cmdline_parser( argparse.ArgumentParser() ) parser.add_argument("input_path", help="Path to your exported predictions.") parsed_args = parser.parse_args() # As a convenience, auto-determine the internal dataset path if possible. path_comp = PathComponents(parsed_args.input_path, os.getcwd()) if path_comp.extension in PathComponents.HDF5_EXTS and path_comp.internalDatasetName == "": with h5py.File(path_comp.externalPath, 'r') as f: all_internal_paths = all_dataset_internal_paths(f) if len(all_internal_paths) == 1: path_comp.internalPath = all_internal_paths[0] parsed_args.input_path = path_comp.totalPath() elif len(all_internal_paths) == 0: sys.stderr.write("Could not find any datasets in your input file.")