Ejemplo n.º 1
0
def load_model(model_uri, ctx):
    """
    Load a Gluon model from a local file or a run.

    :param model_uri: The location, in URI format, of the MLflow model. For example:

                      - ``/Users/me/path/to/local/model``
                      - ``relative/path/to/local/model``
                      - ``s3://my_bucket/path/to/model``
                      - ``runs:/<mlflow_run_id>/run-relative/path/to/model``
                      - ``models:/<model_name>/<model_version>``
                      - ``models:/<model_name>/<stage>``

                      For more information about supported URI schemes, see
                      `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
                      artifact-locations>`_.
    :param ctx: Either CPU or GPU.

    :return: A Gluon model instance.

    >>> # Load persisted model as a Gluon model, make inferences against an NDArray
    >>> model = mlflow.gluon.load_model("runs:/" + gluon_random_data_run.info.run_id + "/model")
    >>> model(nd.array(np.random.rand(1000, 1, 32)))
    """
    local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)

    model_arch_path = os.path.join(local_model_path, "data",
                                   _MODEL_SAVE_PATH) + "-symbol.json"
    model_params_path = os.path.join(local_model_path, "data",
                                     _MODEL_SAVE_PATH) + "-0000.params"
    symbol = sym.load(model_arch_path)
    inputs = sym.var('data', dtype='float32')
    net = gluon.SymbolBlock(symbol, inputs)
    net.collect_params().load(model_params_path, ctx)
    return net
Ejemplo n.º 2
0
def load_model(model_uri, ctx, dst_path=None):
    """
    Load a Gluon model from a local file or a run.

    :param model_uri: The location, in URI format, of the MLflow model. For example:

                      - ``/Users/me/path/to/local/model``
                      - ``relative/path/to/local/model``
                      - ``s3://my_bucket/path/to/model``
                      - ``runs:/<mlflow_run_id>/run-relative/path/to/model``
                      - ``models:/<model_name>/<model_version>``
                      - ``models:/<model_name>/<stage>``

                      For more information about supported URI schemes, see
                      `Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
                      artifact-locations>`_.
    :param ctx: Either CPU or GPU.
    :param dst_path: The local filesystem path to which to download the model artifact.
                     This directory must already exist. If unspecified, a local output
                     path will be created.

    :return: A Gluon model instance.

    .. code-block:: python
        :caption: Example

        # Load persisted model as a Gluon model, make inferences against an NDArray
        model = mlflow.gluon.load_model("runs:/" + gluon_random_data_run.info.run_id + "/model")
        model(nd.array(np.random.rand(1000, 1, 32)))
    """
    import mxnet as mx
    from mxnet import gluon
    from mxnet import sym

    local_model_path = _download_artifact_from_uri(artifact_uri=model_uri,
                                                   output_path=dst_path)
    flavor_conf = _get_flavor_configuration(model_path=local_model_path,
                                            flavor_name=FLAVOR_NAME)
    _add_code_from_conf_to_system_path(local_model_path, flavor_conf)

    model_arch_path = os.path.join(local_model_path, "data",
                                   _MODEL_SAVE_PATH) + "-symbol.json"
    model_params_path = os.path.join(local_model_path, "data",
                                     _MODEL_SAVE_PATH) + "-0000.params"

    if Version(mx.__version__) >= Version("2.0.0"):
        return gluon.SymbolBlock.imports(model_arch_path,
                                         input_names=["data"],
                                         param_file=model_params_path,
                                         ctx=ctx)
    else:
        symbol = sym.load(model_arch_path)
        inputs = sym.var("data", dtype="float32")
        net = gluon.SymbolBlock(symbol, inputs)
        net.collect_params().load(model_params_path, ctx)
    return net
Ejemplo n.º 3
0
 def __init__(self,
              inputs=('C3', 'C4', 'C5'),
              version="v2",
              repeats=4,
              channels=256,
              pre_conv=False,
              cbam=False,
              cbam_reduction=16,
              cbam_expand_dilate=False,
              expand_channels=False,
              weighted_add=False):
     ipts_var = [sym.var(n, dtype='float32') for n in inputs]
     if pre_conv:
         outputs = [
             _conv(x,
                   f'{name}_pre_conv',
                   int(channels * 2**i) if expand_channels else channels,
                   kernel=1,
                   stride=1,
                   pad=0,
                   no_bias=True,
                   norm_layer=sym.BatchNorm,
                   norm_kwargs={
                       'momentum': 0.99,
                       'eps': 1e-5
                   }) for i, (x, name) in enumerate(zip(ipts_var, inputs))
         ]
     else:
         outputs = ipts_var
     for idx in range(repeats):
         if version == "v1":
             outputs = _build_single_bifpn(idx, outputs, inputs, channels,
                                           weighted_add, expand_channels)
         elif version == "v2":
             outputs = _build_single_bifpn_v2(idx, outputs, inputs,
                                              channels, weighted_add,
                                              expand_channels)
         else:
             raise ValueError(f"Unknown version: {version}")
     if cbam:
         shortcut = outputs
         outputs = [
             _cbam(x,
                   f'{name}_cbam',
                   int(channels * 2**i) if expand_channels else channels,
                   reduction=cbam_reduction,
                   act_type='relu',
                   spatial_dilate=0 if not cbam_expand_dilate else i + 1)
             for i, (x, name) in enumerate(zip(outputs, inputs))
         ]
         outputs = [o + s for o, s in zip(outputs, shortcut)]
     super(RecalibreatedBiFPNSymbol, self).__init__(outputs, ipts_var)
Ejemplo n.º 4
0
def _weighted_add(inputs: Tuple, name, weighted_add=False, epsilon=1e-4):
    if weighted_add:
        weight = sym.var(f'{name}.add.weight',
                         shape=(len(inputs), ),
                         dtype=mx.np.float32,
                         init=mx.init.One())
        weight = _activate(weight, act_type='relu')
        weight = sym.broadcast_div(weight,
                                   sym.sum(weight, keepdims=False) + epsilon)
        out = sym.add_n(*[
            sym.broadcast_mul(w, ipt)
            for w, ipt in zip(sym.split(weight, len(inputs), axis=0), inputs)
        ],
                        name=f'{name}_add')
    else:
        out = sym.add_n(*inputs, name=f'{name}_add')
    return out
Ejemplo n.º 5
0
from mxnet import sym
from model import Net

x = sym.var('data')
net = Net()
y = net(x)
y.save('model.json')

Ejemplo n.º 6
0
def get_onnx_model(model):
    if isinstance(model, keras.models.Model):
        # Create a session to avoid problems with names
        session = tf.Session()
        backend.set_session(session)

        with session.as_default():
            with session.graph.as_default():
                # If the model is sequential, it must be executed once and converted
                # to a function one prior to exporting the ONNX model
                if isinstance(model, keras.models.Sequential):
                    model.compile('Adam')

                    # Generate a random input just to execute the model once
                    dummy_input = numpy.random.rand(2, 2)
                    model.predict(dummy_input)

                    # Find the input and output layers to construct the functional model
                    input_layer = layers.Input(batch_shape=model.layers[0].input_shape)
                    prev_layer = input_layer
                    for layer in model.layers:
                        prev_layer = layer(prev_layer)

                    # Create a functional model equivalent to the sequential model
                    model = models.Model([input_layer], [prev_layer])

                # Export the functional keras model
                onnx_model = onnxmltools.convert_keras(model, target_opset=7)
    elif isinstance(model, torch.nn.Module):
        input_shape_found = False

        # Try to find the input shape and export the model
        for i in range(1, 5000):
            try:
                dummy_input = torch.randn(i, i, dtype=torch.float)
                torch.onnx.export(model, dummy_input, ONNX_MODEL_PATH)
                input_shape_found = True
            except RuntimeError:
                pass

            # There was no error, so the input shape has been correctly guessed
            # and the ONNX model was exported so we can stop iterating
            if input_shape_found:
                break

        # If the input shape could not be guessed, return None
        # and an error message will be displayed to the user
        if not input_shape_found:
            return None

        # Load the exported ONNX model file and remove the left-over file
        onnx_model = onnx.load_model(ONNX_MODEL_PATH)
        os.remove(ONNX_MODEL_PATH)
    elif isinstance(model, mx.gluon.nn.HybridBlock):
        # Initialize the MXNet model and create some dummy input
        model.collect_params().initialize(mx.init.Normal())
        dummy_input = numpy.random.rand(2, 2)

        # Propagate the input forward so the model can be fully initialized
        with mx.autograd.record():
            model(mx.nd.array(dummy_input))

        # Once initialized, export the ONNX equivalent of the model
        onnx_mxnet.export_model(
            sym=model(sym.var('data')),
            params={k: v._reduce() for k, v in model.collect_params().items()},
            input_shape=[(64, 2)],
            onnx_file_path=ONNX_MODEL_PATH)

        # Load the exported ONNX model file and remove the left-over file
        onnx_model = onnx.load_model(ONNX_MODEL_PATH)
        os.remove(ONNX_MODEL_PATH)
    elif isinstance(model, onnx.ModelProto):
        # The model is already an ONNX one
        onnx_model = model
    else:
        # The model was not produced by Keras, PyTorch, MXNet or ONNX and cannot be visualized
        # This point should not be reachable, as retrieval of the flow reinitializes the model
        # and that ensures if can be handled by MXNetExtension, OnnxExtension, PytorchExtension
        # or KerasExtension and therefore the model was produced by one of the libraries.
        return None

    return onnx_model