コード例 #1
0
val_stats_handler = StatsHandler(
    name='evaluator',
    output_transform=lambda x:
    None  # no need to print loss value, so disable per iteration output
)
val_stats_handler.attach(evaluator)

# for the arrary data format, assume the 3rd item of batch data is the meta_data
prediction_saver = ClassificationSaver(
    output_dir='tempdir',
    name='evaluator',
    batch_transform=lambda batch:
    {'filename_or_obj': batch['img.filename_or_obj']},
    output_transform=lambda output: output[0].argmax(1))
prediction_saver.attach(evaluator)

# the model was trained by "densenet_training_dict" exmple
CheckpointLoader(load_path='./runs/net_checkpoint_40.pth',
                 load_dict={
                     'net': net
                 }).attach(evaluator)

# create a validation data loader
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
val_loader = DataLoader(val_ds,
                        batch_size=2,
                        num_workers=4,
                        pin_memory=torch.cuda.is_available())

state = evaluator.run(val_loader)
コード例 #2
0
# StatsHandler prints loss at every iteration and print metrics at every epoch,
# we don't need to print loss for evaluator, so just print metrics, user can also customize print functions
val_stats_handler = StatsHandler(
    name='evaluator',
    output_transform=lambda x:
    None  # no need to print loss value, so disable per iteration output
)
val_stats_handler.attach(evaluator)

# for the arrary data format, assume the 3rd item of batch data is the meta_data
file_saver = SegmentationSaver(
    output_path='tempdir',
    output_ext='.nii.gz',
    output_postfix='seg',
    name='evaluator',
    batch_transform=lambda x: x[2],
    output_transform=lambda output: predict_segmentation(output[0]))
file_saver.attach(evaluator)

# the model was trained by "unet_training_array" exmple
ckpt_saver = CheckpointLoader(load_path='./runs/net_checkpoint_50.pth',
                              load_dict={'net': net})
ckpt_saver.attach(evaluator)

# sliding window inferene need to input 1 image in every iteration
loader = DataLoader(ds,
                    batch_size=1,
                    num_workers=1,
                    pin_memory=torch.cuda.is_available())
state = evaluator.run(loader)
コード例 #3
0
)
net.to(device)


def _sliding_window_processor(_engine, batch):
    net.eval()
    img, seg, meta_data = batch
    with torch.no_grad():
        seg_probs = sliding_window_inference(img, roi_size, sw_batch_size,
                                             lambda x: net(x)[0], device)
        return predict_segmentation(seg_probs)


infer_engine = Engine(_sliding_window_processor)

# checkpoint_handler = ModelCheckpoint('./', 'net', n_saved=10, save_interval=3, require_empty=False)
# infer_engine.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=checkpoint_handler, to_save={'net': net})

SegmentationSaver(output_path='tempdir',
                  output_ext='.nii.gz',
                  output_postfix='seg').attach(infer_engine)
CheckpointLoader(load_path='./net_checkpoint_9.pth', load_dict={
    'net': net
}).attach(infer_engine)

loader = DataLoader(ds,
                    batch_size=1,
                    num_workers=1,
                    pin_memory=torch.cuda.is_available())
state = infer_engine.run(loader)