def post_process(seg_config_filename, clas_config_filename): seg_config = load_config(seg_config_filename) clas_config = load_config(clas_config_filename) setup_logging(seg_config) h5.PostProcessor(seg_config["output"]["N"], verbose=2).process( seg_config["output"]["avg"], clas_config["output"]["avg"], seg_config["output"]["img"], seg_config["output"]["sub"], )
def average(config_filename): config = load_config(config_filename) setup_logging(config) getattr(h5, config["average"])(config["output"]["N"], verbose=config["verbose"]).average( config["output"]["raw"], config["output"]["avg"], config["group_weights"])
def predict_all(folder, config_filename): config = load_config(config_filename) checkpoints = sorted(list(Path(folder).glob("**/model_best.pth"))) print(f"Performing predictions for {checkpoints}") for checkpoint in checkpoints: try: InferenceManager(config).run(checkpoint) except Exception as ex: print(f"Caught exception: {ex}")
def __init__(self, base_config): self._base = load_config(base_config)
def predict(config_filename, model_checkpoint): """ Perform inference using saved model weights, and save to HDF5 database. """ config = load_config(config_filename) InferenceManager(config).run(model_checkpoint)
def train_ensemble(config_filename, num_models): config = load_config(config_filename) EnsembleManager(config).start(num_models)
def analyse(config_filename): seg_config = load_config(config_filename) setup_logging(seg_config) h5.HDF5SegAnalyser(seg_config["output"]["N"], verbose=2).analyse(seg_config["output"]["avg"], seg_config["output"]["ana"])