Beispiel #1
0
def eval_model_zip(model_zip: ZipFile):
    with TemporaryDirectory() as tempdir:
        temp_path = Path(tempdir)
        model_zip.extractall(temp_path)
        spec_file_str = guess_model_path(
            [str(file_name) for file_name in temp_path.glob("*")])
        bioimageio_model = load_model(spec_file_str)

        return get_nn_instance(bioimageio_model)
Beispiel #2
0
 def __init__(self, *, bioimageio_model: nodes.Model, devices=Sequence[str]):
     self._internal_output_axes = bioimageio_model.outputs[0].axes
     spec = bioimageio_model
     self.model = get_nn_instance(bioimageio_model)
     self.devices = [torch.device(d) for d in devices]
     self.model.to(self.devices[0])
     assert isinstance(self.model, torch.nn.Module)
     weights = spec.weights.get("pytorch_state_dict")
     if weights is not None and weights.source:
         state = torch.load(weights.source, map_location=self.devices[0])
         self.model.load_state_dict(state)
Beispiel #3
0
    def __init__(self, *, bioimageio_model: nodes.Model, devices=List[str]):
        spec = bioimageio_model
        self.name = spec.name

        spec.inputs[0]
        _output = spec.outputs[0]
        # FIXME: TF probably uses different axis names
        self._internal_output_axes = _output.axes

        self.model = get_nn_instance(bioimageio_model)
        self.devices = []
        tf_model = tf.keras.models.load_model(
            spec.weights["tensorflow_saved_model_bundle"].source)
        self.model.set_model(tf_model)
Beispiel #4
0
def convert_weights_to_onnx(model_yaml: Union[str, Path],
                            output_path: Union[str, Path],
                            opset_version: Union[str, None] = 12,
                            use_tracing: bool = True,
                            verbose: bool = True):
    """ Convert model weights from format 'pytorch_state_dict' to 'onnx'.

    Arguments:
        model_yaml: location of the model.yaml file with bioimage.io spec
        output_path: where to save the onnx weights
        opset_version: onnx opset version
        use_tracing: whether to use tracing or scripting to export the onnx format
        verbose: be verbose during the onnx export
    """
    spec = load_model(model_yaml)

    with torch.no_grad():
        # load input and expected output data
        input_data = np.load(spec.test_inputs[0]).astype('float32')
        input_tensor = torch.from_numpy(input_data)

        # instantiate and generate the expected output
        model = get_nn_instance(spec)
        state = torch.load(spec.weights['pytorch_state_dict'].source)
        model.load_state_dict(state)
        expected_output = model(input_tensor).numpy()

        if use_tracing:
            torch.onnx.export(model,
                              input_tensor,
                              output_path,
                              verbose=verbose,
                              opset_version=opset_version)
        else:
            raise NotImplementedError

        # check the onnx model
        sess = rt.InferenceSession(output_path)
        input_name = sess.get_inputs()[0].name
        output = sess.run(None, {input_name: input_data})[0]

        try:
            assert_array_almost_equal(expected_output, output, decimal=4)
            return 0
        except AssertionError as e:
            msg = f"The onnx weights were exported, but results before and after conversion do not agree:\n {str(e)}"
            warnings.warn(msg)
            return 1
Beispiel #5
0
def convert_weights_to_torchscript(
    model_yaml: Union[str, Path],
    output_path: Union[str, Path],
    use_tracing: bool = True
):
    """ Convert model weights from format 'pytorch_state_dict' to 'torchscript'.
    """
    spec = load_model(model_yaml)

    with torch.no_grad():
        # load input and expected output data
        input_data = np.load(spec.test_inputs[0]).astype('float32')
        input_data = torch.from_numpy(input_data)

        # instantiate model and get reference output
        model = get_nn_instance(spec)
        state = torch.load(spec.weights['pytorch_state_dict'].source)
        model.load_state_dict(state)

        # get the expected output to validate the torchscript weights
        expected_output = model(input_data)

        # make scripted model
        if use_tracing:
            scripted_model = torch.jit.trace(model, input_data)
        else:
            scripted_model = torch.jit.script(model)

        # check the scripted model
        output = scripted_model(input_data).numpy()

    # save the torchscript model
    scripted_model.save(output_path)

    try:
        assert_array_almost_equal(expected_output, output, decimal=4)
        return 0
    except AssertionError as e:
        msg = f"The onnx weights were exported, but results before and after conversion do not agree:\n {str(e)}"
        warnings.warn(msg)
        return 1
Beispiel #6
0
def simple_training(
    bioimageio_model: ModelWithKwargs, n_iterations: int, batch_size: int, num_workers: int, out_file: Union[str, Path, IO[bytes]]
) -> torch.nn.Module:
    """ Simplified training loop.
    """
    if isinstance(out_file, str) or isinstance(out_file, Path):
        out_file = Path(out_file)
        out_file.parent.mkdir(exist_ok=True)

    model = get_nn_instance(bioimageio_model)

    # instantiate all training parameters from the training config
    setup = bioimageio_model.spec.training.setup

    sampler = get_nn_instance(setup.sampler)

    preprocess = [get_nn_instance(prep) for prep in setup.preprocess]
    postprocess = [get_nn_instance(post) for post in setup.postprocess]

    losses = [get_nn_instance(loss_prep) for loss_prep in setup.losses]
    optimizer = get_nn_instance(setup.optimizer, params=model.parameters())

    # build the data-loader from our sampler
    loader = DataLoader(sampler, shuffle=True, num_workers=num_workers, batch_size=batch_size)

    # run the training loop
    for ii in trange(n_iterations):
        x, y = next(iter(loader))
        optimizer.zero_grad()

        x, y = apply_transformations(preprocess, x, y)
        out = model(x)
        out, y = apply_transformations(postprocess, out, y)
        losses = apply_transformations(losses, out, y)
        ll = sum(losses)
        ll.backward()

        optimizer.step()

    # save model weights
    torch.save(model.state_dict(), out_file)
    return model