data_module = DataModule(args) data_module.prepare_data() data_module.setup() ckpt_path = verify_ckpt_path(args) callbacks = None model_ckpt = None if args.benchmark: model = NNUnet(args) batch_size = args.batch_size if args.exec_mode == "train" else args.val_batch_size log_dir = os.path.join(args.results, args.logname if args.logname is not None else "perf.json") callbacks = [ LoggingCallback( log_dir=log_dir, global_batch_size=batch_size * args.gpus, mode=args.exec_mode, warmup=args.warmup, dim=args.dim, ) ] elif args.exec_mode == "train": model = NNUnet(args) early_stopping = EarlyStopping(monitor="dice_mean", patience=args.patience, verbose=True, mode="max") callbacks = [early_stopping] if args.save_ckpt: model_ckpt = ModelCheckpoint( filename="{epoch}-{dice_mean:.2f}", monitor="dice_mean", mode="max", save_last=True ) callbacks.append(model_ckpt) else: # Evaluation or inference if ckpt_path is not None:
data_module.prepare_data() data_module.setup() ckpt_path = verify_ckpt_path(args) callbacks = None model_ckpt = None if args.benchmark: model = NNUnet(args) batch_size = args.batch_size if args.exec_mode == "train" else args.val_batch_size filnename = args.logname if args.logname is not None else "perf1.json" callbacks = [ LoggingCallback( log_dir=args.results, filnename=filnename, global_batch_size=batch_size * args.gpus, mode=args.exec_mode, warmup=args.warmup, dim=args.dim, profile=args.profile, ) ] elif args.exec_mode == "train": model = NNUnet(args) early_stopping = EarlyStopping(monitor="dice_mean", patience=args.patience, verbose=True, mode="max") callbacks = [early_stopping] if args.save_ckpt: model_ckpt = ModelCheckpoint(filename="{epoch}-{dice_mean:.2f}", monitor="dice_mean",