Пример #1
0
def test_model_3d_att():
    # verifying if 3d attention model can be created
    a = [[[[[0 for i in range(48)] for j in range(48)] for k in range(16)]]]
    inp = torch.tensor(a).float()
    model = imed_model.Modified3DUNet(in_channel=1, out_channel=1, attention=True)
    inf = model(inp)
    assert(type(inf) == torch.Tensor)
Пример #2
0
def test_create_model_unet3d():
    model = imed_models.Modified3DUNet(in_channel=1,
                                       out_channel=1,
                                       n_filters=8,
                                       attention=True)
    torch.save(model, "model_unet_3d_test.pt")
    os.makedirs("3d_test")
    command = "cp model_unet_3d_test.pt 3d_test/best_model.pt"
    subprocess.check_output(command, shell=True)
Пример #3
0
def test_onnx(download_data_testing_test_files):
    model = imed_models.Modified3DUNet(1, 1)
    if not PATH_MODEL.exists():
        PATH_MODEL.mkdir()
    torch.save(model, PATH_MODEL_PT)
    img = nib.load(IMAGE_PATH).get_fdata().astype('float32')[:16, :64, :32]
    # Add batch and channel dimensions
    img_tensor = torch.tensor(img).unsqueeze(0).unsqueeze(0)
    dummy_input = torch.randn(1, 1, 32, 32, 32)
    imed_utils.save_onnx_model(model, dummy_input, str(PATH_MODEL_ONNX))

    model = torch.load(PATH_MODEL_PT)
    model.eval()
    out_pt = model(img_tensor).detach().numpy()

    out_onnx = imed_inference.onnx_inference(str(PATH_MODEL_ONNX),
                                             img_tensor).numpy()
    shutil.rmtree(PATH_MODEL)
    assert np.allclose(out_pt, out_onnx, rtol=1e-3)
Пример #4
0
def test_segment_volume_3d(download_functional_test_files, center_crop):
    model = imed_models.Modified3DUNet(in_channel=1,
                                       out_channel=1,
                                       base_n_filter=1)

    if not os.path.exists(PATH_MODEL):
        os.mkdir(PATH_MODEL)

    torch.save(model, os.path.join(PATH_MODEL, "model_test.pt"))
    config = {
        "Modified3DUNet": {
            "applied": True,
            "length_3D": LENGTH_3D,
            "stride_3D": LENGTH_3D,
            "attention": False
        },
        "loader_parameters": {
            "slice_filter_params": {
                "filter_empty_mask": False,
                "filter_empty_input": False
            },
            "roi_params": {
                "suffix": None,
                "slice_filter_roi": None
            },
            "slice_axis": "sagittal"
        },
        "transformation": {
            "Resample": {
                "wspace": 1,
                "hspace": 1,
                "dspace": 2
            },
            "CenterCrop": {
                "size": center_crop
            },
            "RandomTranslation": {
                "translate": [0.03, 0.03],
                "applied_to": ["im", "gt"],
                "dataset_type": ["training"]
            },
            "NumpyToTensor": {},
            "NormalizeInstance": {
                "applied_to": ["im"]
            }
        },
        "postprocessing": {},
        "training_parameters": {
            "batch_size": BATCH_SIZE
        }
    }

    PATH_CONFIG = os.path.join(PATH_MODEL, 'model_test.json')
    with open(PATH_CONFIG, 'w') as fp:
        json.dump(config, fp)

    nib_lst, _ = imed_inference.segment_volume(PATH_MODEL, [IMAGE_PATH])
    nib_img = nib_lst[0]
    assert np.squeeze(nib_img.get_fdata()).shape == nib.load(IMAGE_PATH).shape
    assert (nib_img.dataobj.max() <= 1.0) and (nib_img.dataobj.min() >= 0.0)
    assert nib_img.dataobj.dtype == 'float32'

    shutil.rmtree(PATH_MODEL)