Exemple #1
0
    def _build_graph(self):
        if self._use_static_shape:
            self._graph_builder.build(self._input_info.shape)
        else:
            self._graph_builder.build()

        self._onnx_models.optimized_model = onnx.load_model_from_string(
            self._graph_builder.get_model())

        self._onnx_models.optimized_pre_grad_model = onnx.load_model_from_string(
            self._graph_builder.get_inference_optimized_model())

        self._graph_info = self._graph_builder.get_graph_info()

        # Map each input/initializer to its gradient index in the graph output, or -1 is gradient is not required.
        self._gradient_map = []
        num_user_input_grads = len(self._input_info.require_grad_names)
        require_grad_names_set = set(self._input_info.require_grad_names)
        require_grad_names_index = 0
        for input_name in self._graph_info.user_input_names:
            if input_name in require_grad_names_set:
                self._gradient_map.append(require_grad_names_index)
                require_grad_names_index += 1
            else:
                self._gradient_map.append(-1)

        initializer_index = num_user_input_grads
        for initializer_name in self._graph_info.initializer_names:
            if initializer_name in self._graph_initializer_names_to_train:
                self._gradient_map.append(initializer_index)
                initializer_index += 1
            else:
                self._gradient_map.append(-1)
Exemple #2
0
    def _build_graph(self):
        if self._use_static_shape:
            self._graph_builder.build(self._input_info.shape)
        else:
            self._graph_builder.build()

        self._onnx_models.optimized_model = onnx.load_model_from_string(
            self._graph_builder.get_model())

        self._onnx_models.optimized_pre_grad_model = onnx.load_model_from_string(
            self._graph_builder.get_inference_optimized_model())

        self._graph_info = self._graph_builder.get_graph_info()
    def _build_training_graph(self, *inputs, **kwargs):
        if self._use_static_shape:
            self._module_gradient_graph_builder.build(
                self._current_input_shape)
        else:
            self._module_gradient_graph_builder.build()
        self._onnx_training = onnx.load_model_from_string(
            self._module_gradient_graph_builder.get_training_model())
        self._onnx_graphs_info = self._module_gradient_graph_builder.get_training_graph_info()

        if self._save_onnx:
            inference_optimized_model = onnx.load_model_from_string(
                self._module_gradient_graph_builder.get_inference_optimized_model())
            onnx.save(inference_optimized_model, self._save_onnx_prefix + '_inference_optimized.onnx')
            onnx.save(self._onnx_training, self._save_onnx_prefix + '_training.onnx')
Exemple #4
0
    def test_save_and_load_model(self):
        proto = self._simple_model()
        cls = ModelProto
        proto_string = onnx._serialize(proto)

        # Test if input is string
        loaded_proto = onnx.load_model_from_string(proto_string)
        self.assertTrue(proto == loaded_proto)

        # Test if input has a read function
        f = io.BytesIO()
        onnx.save_model(proto_string, f)
        f = io.BytesIO(f.getvalue())
        loaded_proto = onnx.load_model(f, cls)
        self.assertTrue(proto == loaded_proto)

        # Test if input is a file name
        try:
            f = tempfile.NamedTemporaryFile(delete=False)
            onnx.save_model(proto, f)
            f.close()

            loaded_proto = onnx.load_model(f.name, cls)
            self.assertTrue(proto == loaded_proto)
        finally:
            os.remove(f.name)
Exemple #5
0
 def _load_onnx_model(onnx_string_or_file):
     """Loads onnx model from file or string"""
     # if input is file, read string
     if hasattr(onnx_string_or_file, "seek"):
         onnx_string_or_file.seek(0)
         return onnx.load(onnx_string_or_file)
     return onnx.load_model_from_string(onnx_string_or_file)
Exemple #6
0
def from_onnx_bytes_opaque_func(xgraph, arg):
    """ Expose the ONNX model import function as an opaque function
        so it can be called from both Python and C++ """

    onnx_model = onnx.load_model_from_string(arg)

    _from_onnx(onnx_model, xgraph=xgraph)
Exemple #7
0
def fwd_graph(popart_model, torch_model, mapping=None, transform=None):
    #  ------------------- PopART --------------------
    config = popart_model.config
    builder = popart_model.builder

    sequence_info = popart.TensorInfo(
        "INT32", [config.batch_size * config.sequence_length])
    indices = builder.addInputTensor(sequence_info)
    positions = builder.addInputTensor(sequence_info)
    segments = builder.addInputTensor(sequence_info)
    data = {
        indices:
        np.random.randint(0, config.vocab_length,
                          (config.batch_size * config.sequence_length)).astype(
                              np.int32),
        positions:
        np.random.randint(0, config.sequence_length,
                          (config.batch_size * config.sequence_length)).astype(
                              np.int32),
        segments:
        np.random.randint(0, 2,
                          (config.batch_size * config.sequence_length)).astype(
                              np.int32)
    }

    output = popart_model.build_graph(indices, positions, segments)
    proto = builder.getModelProto()

    outputs, post_proto = run_py(
        proto,
        data,
        output,
        ipus=math.ceil(config.num_layers / config.layers_per_ipu) +
        popart_model.layer_offset)

    # ----------------- PopART -> PyTorch ----------------
    proto = onnx.load_model_from_string(proto)

    inputs = {
        "input_ids":
        data[indices].reshape(config.batch_size, config.sequence_length),
        "position_ids":
        data[positions].reshape(config.batch_size, config.sequence_length),
        "token_type_ids":
        data[segments].reshape(config.batch_size, config.sequence_length)
    }

    torch_to_onnx = get_mapping(config, init=mapping)

    transform_weights = get_transform(config, init=transform)

    #  ------------------- PyTorch -------------------------
    # Turn off dropout
    torch_model.eval()

    copy_weights_to_torch(torch_model, proto, torch_to_onnx, transform_weights)

    torch_outputs = run_fwd_model(inputs, torch_model)

    check_tensors(torch_outputs, outputs)
Exemple #8
0
def test_np_memory_layout_add_initialized_input_tensor2():
    """ Test that when we create a parameter input with a non-contiguous array
        it is correctly represented in the computational graph.
    """
    np.random.seed(1)

    # Build a computational graph. Add to input parameters, one contiguous and one transposed
    # and hence non-contiguous.
    builder = popart.Builder()
    input1Value = np.random.randint(0, 100, size=(3, 5), dtype='int32')
    input2Value = np.random.randint(0, 100, size=(20, 30), dtype='int32')
    input2Value = np.transpose(input2Value)
    _ = builder.addInitializedInputTensor(input1Value, "contiguous_input")
    _ = builder.addInitializedInputTensor(input2Value, "transposed_input")

    # Get data back from computational graph.
    proto = builder.getModelProto()
    onnxProto = onnx.load_model_from_string(proto)
    input1Init = next(arr for arr in onnxProto.graph.initializer
                      if 'contiguous_input' in arr.name)
    input2Init = next(arr for arr in onnxProto.graph.initializer
                      if 'transposed_input' in arr.name)
    input1ValueInGraph = numpy_helper.to_array(input1Init)
    input2ValueInGraph = numpy_helper.to_array(input2Init)

    # Test the data matches the initialised arrays.
    assert (input1ValueInGraph == input1Value
            ).all(), f"Expected {input1ValueInGraph} to match {input1Value}"
    assert (input2ValueInGraph == input2Value
            ).all(), f"Expected {input2ValueInGraph} to match {input2Value}"
Exemple #9
0
def pytorch_result_and_model(torch_config, inputs, popart_proto, is_bwd=False):
    # Conversion of the popart model to onnx
    proto = onnx.load_model_from_string(popart_proto)

    torch_model = BertFCN(torch_config)
    # Turn off dropout
    torch_model.eval()

    copy_weights_to_torch(torch_model,
                          proto,
                          TORCH_TO_ONNX,
                          transform=TRANSPOSE_WEIGHTS)

    result = run_fwd_model(inputs, torch_model)

    if is_bwd:
        l1_lambda = 0.1
        optim = torch.optim.SGD(torch_model.parameters(),
                                0.01,
                                weight_decay=0.0,
                                momentum=0.0)

        result = torch_model(*[torch.from_numpy(t).float() for t in inputs])[0]
        torch_loss = l1_lambda * torch.norm(result, 1)
        torch_loss.backward()
        optim.step()
        result = result.detach().numpy()

    return result, torch_model
Exemple #10
0
    def test_save_and_load_model(self):  # type: () -> None
        proto = self._simple_model()
        cls = ModelProto
        proto_string = onnx._serialize(proto)

        # Test if input is string
        loaded_proto = onnx.load_model_from_string(proto_string)
        self.assertTrue(proto == loaded_proto)

        # Test if input has a read function
        f = io.BytesIO()
        onnx.save_model(proto_string, f)
        f = io.BytesIO(f.getvalue())
        loaded_proto = onnx.load_model(f, cls)
        self.assertTrue(proto == loaded_proto)

        # Test if input is a file name
        try:
            fi = tempfile.NamedTemporaryFile(delete=False)
            onnx.save_model(proto, fi)
            fi.close()

            loaded_proto = onnx.load_model(fi.name, cls)
            self.assertTrue(proto == loaded_proto)
        finally:
            os.remove(fi.name)
Exemple #11
0
def from_onnx(onnx_string_or_file):
    """
    Constructs a CrypTen model or module from an ONNX Protobuf string or file.
    """

    # if input is file, read string:
    if hasattr(onnx_string_or_file, "seek"):  # input is file-like
        onnx_string_or_file.seek(0)
        onnx_model = onnx.load(onnx_string_or_file)
    else:
        onnx_model = onnx.load_model_from_string(onnx_string_or_file)

    # create dict of all parameters, inputs, and outputs:
    all_parameters = {
        t.name: torch.from_numpy(numpy_helper.to_array(t))
        for t in onnx_model.graph.initializer
    }
    input_names = [input.name for input in onnx_model.graph.input]
    output_names = [output.name for output in onnx_model.graph.output]
    input_names = [
        name for name in input_names if name not in all_parameters.keys()
    ]  # parameters are not inputs
    assert len(input_names) == 1, "number of inputs should be 1"
    assert len(output_names) == 1, "number of outputs should be 1"

    # create graph by looping over nodes:
    crypten_model = Graph(input_names[0], output_names[0])
    for node in onnx_model.graph.node:
        # get operator type:
        if node.op_type not in ONNX_TO_CRYPTEN:
            raise ValueError("CrypTen does not support op %s." % node.op_type)
        cls = ONNX_TO_CRYPTEN[node.op_type]

        # retrieve inputs, outputs, attributes, and parameters for this node:
        node_output_name = [name for name in node.output][0]
        node_input_names = [name for name in node.input]  # includes parameters
        parameters = {
            get_parameter_name(name): all_parameters[name]
            for name in node_input_names
            if name in all_parameters and name not in input_names
        }  # all the parameters for the current module
        node_input_names = [
            name
            for name in node_input_names
            if get_parameter_name(name) not in parameters
        ]
        attributes = {attr.name: get_attribute_value(attr) for attr in node.attribute}

        # add CrypTen module to graph:
        crypten_module = cls.from_onnx(parameters=parameters, attributes=attributes)
        crypten_model.add_module(node_output_name, crypten_module, node_input_names)

    # return model (or module when there is only one module):
    num_modules = len([_ for _ in crypten_model.modules()])
    if num_modules == 1:
        for crypten_module in crypten_model.modules():
            return crypten_module
    else:
        return crypten_model
    def _get_exported_model(self, *inputs, **kwargs):
        '''Exports PyTorch `self._flattened_module` to ONNX for inferencing or training, using `*inputs` as input

        TODO: How to support dynamic axes? Dimensions are determined by samples
        '''

        # Setup dynamic axes for onnx model
        self._input_info = _io.parse_inputs_for_onnx_export(
            self._module_parameters, None, inputs, kwargs)
        output_names, output_dynamic_axes, self._module_output_schema = \
            _io.parse_outputs_for_onnx_export_and_extract_schema(self._original_module, inputs, kwargs)
        self._input_info.dynamic_axes.update(output_dynamic_axes)

        # FlattenedModule needs _InputInfo to expand user input from *args to *args + **kwargs
        self._flattened_module._input_info = self._input_info

        # Export torch.nn.Module to ONNX
        f = io.BytesIO()

        # Deepcopy inputs, since input values may change after model run.
        # NOTE: Inputs may contain tensors that have attributes preventing their deepcopy (example grad_fn).
        # Therefore, deepcopy only the data component of the input tensors for export.
        sample_inputs_copy, sample_kwargs_copy = _io.deepcopy_model_input(
            *inputs, **kwargs)
        # NOTE: Flattening the input will change the 'input schema', resulting in a re-export
        sample_inputs_as_tuple = tuple(
            self._input_info.flatten(sample_inputs_copy, sample_kwargs_copy,
                                     self._device))
        # Ops behaving differently under train/eval mode need to exported with the
        # correct training flag to reflect the expected behavior.
        # For example, the Dropout node in a model is dropped under eval mode.
        assert self._export_mode is not None, "Please use a concrete instance of ExecutionManager"

        try:
            with torch.set_grad_enabled(self._enable_custom_autograd_function), \
                    _logger.suppress_os_stream_output(log_level=self._debug_options.logging.log_level):
                torch.onnx.export(self._flattened_module,
                                  sample_inputs_as_tuple,
                                  f,
                                  input_names=self._input_info.names,
                                  output_names=output_names,
                                  opset_version=ONNX_OPSET_VERSION,
                                  do_constant_folding=False,
                                  training=self._export_mode,
                                  dynamic_axes=self._input_info.dynamic_axes,
                                  verbose=self._debug_options.logging.log_level
                                  < LogLevel.WARNING,
                                  export_params=False,
                                  keep_initializers_as_inputs=True)
        except RuntimeError as e:
            raise RuntimeError(
                'There was an error while exporting the PyTorch model to ONNX: {}'
                .format(e))
        exported_model = onnx.load_model_from_string(f.getvalue())

        exported_model = _post_process_after_export(
            exported_model, self._enable_custom_autograd_function)

        return exported_model
def _load_onnx_model(onnx_string_or_file):
    """
    Loads ONNX model from file or string.
    """
    if hasattr(onnx_string_or_file, "seek"):
        onnx_string_or_file.seek(0)
        return onnx.load(onnx_string_or_file)
    return onnx.load_model_from_string(onnx_string_or_file)
Exemple #14
0
def build_engine(onnx_path,
                 seq_len=192,
                 max_seq_len=256,
                 batch_size=8,
                 max_batch_size=64,
                 trt_fp16=True,
                 verbose=True,
                 max_workspace_size=None,
                 encoder=True):
    """Builds TRT engine from an ONNX file
    Note that network output 1 is unmarked so that the engine will not use
    vestigial length calculations associated with masked_fill
    """
    TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) if verbose else trt.Logger(
        trt.Logger.WARNING)
    builder = trt.Builder(TRT_LOGGER)
    builder.max_batch_size = max_batch_size

    with open(onnx_path, 'rb') as model_fh:
        model = model_fh.read()

    model_onnx = onnx.load_model_from_string(model)
    input_feats = model_onnx.graph.input[0].type.tensor_type.shape.dim[
        1].dim_value

    if trt_fp16:
        builder.fp16_mode = True
        print("Optimizing for FP16")
        config_flags = 1 << int(
            trt.BuilderFlag.FP16)  # | 1 << int(trt.BuilderFlag.STRICT_TYPES)
    else:
        config_flags = 0
    builder.max_workspace_size = max_workspace_size if max_workspace_size \
        else (
                4 * 1024 * 1024 * 1024)

    config = builder.create_builder_config()
    config.flags = config_flags

    profile = builder.create_optimization_profile()
    profile.set_shape("audio_signal" if encoder else "encoder_output",
                      min=(1, input_feats, seq_len),
                      opt=(batch_size, input_feats, seq_len),
                      max=(max_batch_size, input_feats, max_seq_len))
    # if encoder:
    #     profile.set_shape("encoded_lengths",
    #                         min=(1,), opt=(batch_size,),
    #                         max=(max_batch_size,))
    config.add_optimization_profile(profile)

    explicit_batch = 1 << (int)(
        trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
    network = builder.create_network(explicit_batch)

    with trt.OnnxParser(network, TRT_LOGGER) as parser:
        parsed = parser.parse(model)
        print("Parsing returned ", parsed)
        return builder.build_engine(network, config=config)
    def _build_graph(self):
        """Build an optimized gradient graph using the module_graph_builder"""

        super()._build_graph()

        if self._save_onnx:
            onnx.save(self._optimized_onnx_model, self._save_onnx_prefix + '_training.onnx')
            inference_optimized_model = onnx.load_model_from_string(self._graph_builder.get_inference_optimized_model())
            onnx.save(inference_optimized_model, self._save_onnx_prefix + '_inference_optimized.onnx')
Exemple #16
0
def get_model_proto(config, initializers=None):
    model = Bert(config, pipeline=True, initializers=initializers)

    sequence_info = popart.TensorInfo("UINT32", [config.micro_batch_size * config.sequence_length])
    indices = model.builder.addInputTensor(sequence_info)
    positions = model.builder.addInputTensor(sequence_info)
    segments = model.builder.addInputTensor(sequence_info)

    output = model.build_graph(indices, positions, segments)

    return onnx.load_model_from_string(model.builder.getModelProto())
Exemple #17
0
def run_models(config, proto, indices, positions, segments, output,
               popart_model, torch_model):
    onnx_proto = onnx.load_model_from_string(proto)
    check_model(torch_model, onnx_proto, get_mapping(config),
                get_transform(config))

    # Run the models
    popart_inputs = {
        indices:
        np.random.randint(0, config.vocab_length,
                          (config.batch_size * config.sequence_length)).astype(
                              np.uint32),
        positions:
        np.random.randint(
            0,
            config.sequence_length,
            (config.batch_size * config.sequence_length),
        ).astype(np.uint32),
        segments:
        np.random.randint(
            0,
            2,
            (config.batch_size * config.sequence_length),
        ).astype(np.uint32),
    }

    popart_outputs, post_proto = run_py(
        proto,
        popart_inputs,
        output,
        ipus=popart_model.total_ipus,
    )

    torch_inputs = {
        "input_ids":
        popart_inputs[indices].reshape(config.batch_size,
                                       config.sequence_length),
        "position_ids":
        popart_inputs[positions].reshape(config.batch_size,
                                         config.sequence_length),
        "token_type_ids":
        popart_inputs[segments].reshape(config.batch_size,
                                        config.sequence_length),
    }

    torch_model.eval()
    torch_outputs = run_fwd_model(torch_inputs, torch_model)

    check_model(torch_model, post_proto, get_mapping(config),
                get_transform(config))
    check_tensors(torch_outputs, popart_outputs)
    print("Test succeeded")
Exemple #18
0
def save_model(model, model_path, serialization=None):
    if serialization is None:
        metadata = get_model_metadata(model)
        serialization = metadata['serialization']

    if serialization not in SUPPORTED_SERIALIZATIONS:  # pragma: no cover
        raise ValueError("serialization should be one of %s, %s was given" %
                         (SUPPORTED_SERIALIZATIONS, serialization))

    raw_model = model
    if serialization == 'joblib':
        try:
            import joblib
        except ImportError:
            from sklearn.externals import joblib
        joblib.dump(model, model_path)
    elif serialization == 'pickle':
        import pickle
        with open(model_path, 'wb') as f:
            pickle.dump(model, f)
    elif serialization == 'xgboost':
        model.save_model(model_path)
    elif serialization == 'hdf5':
        model.save(model_path)
    elif serialization == 'pt':
        import torch
        torch.save(model.state_dict(), model_path)
    elif serialization == 'spark':
        from pyspark.ml import PipelineModel
        model.write().overwrite().save(model_path)
    elif serialization == 'pmml':
        if hasattr(model, 'read') and callable(model.read):
            model = model.read()
        if os.path.exists(model):
            with open(model, mode='rb') as f:
                model = f.read()
        mode = 'wb' if isinstance(model, (bytes, bytearray)) else 'w'
        with open(model_path, mode) as file:
            file.write(model)
    elif serialization == 'onnx':
        import onnx
        if isinstance(model, onnx.ModelProto):
            onnx_model = model
        elif isinstance(model, (bytes, bytearray)):
            onnx_model = onnx.load_model_from_string(model)
        else:
            onnx_model = onnx.load_model(model)
        onnx.save(onnx_model, model_path)
    elif serialization == 'lightgbm':
        model.save_model(model_path)

    return raw_model
Exemple #19
0
    def _build_graph(self):
        if self._use_static_shape:
            self._graph_builder.build(self._input_info.shape)
        else:
            self._graph_builder.build()

        self._optimized_onnx_model = onnx.load_model_from_string(self._graph_builder.get_model())
        self._graph_info = self._graph_builder.get_graph_info()

        # TODO: Explore ways to make self._graph_info.initializer_names and self._graph_info.initializer_names_to_train
        #       a set (unordered_set in the backend) that does not require a copy on each reference.
        self._graph_initializer_names = set(self._graph_info.initializer_names)
        self._graph_initializer_names_to_train = set(self._graph_info.initializer_names_to_train)
Exemple #20
0
def convert_to_onnx_object(model, export_parameters=None, **kwargs):
    """
    Convert given CatBoost model to ONNX-ML model.
    Categorical Features are not supported.

    Parameters
    ----------
    model : CatBoost trained model
    export_parameters : dict [default=None]
        Parameters for ONNX-ML export:
            * onnx_graph_name : string
                The name property of onnx Graph
            * onnx_domain : string
                The domain component of onnx Model
            * onnx_model_version : int
                The model_version component of onnx Model
            * onnx_doc_string : string
                The doc_string component of onnx Model
    Returns
    -------
    onnx_object : ModelProto
        The model in ONNX format
    """
    try:
        import onnx
    except ImportError as e:
        warnings.warn("To get working onnx model you should install onnx.")
        raise ImportError(str(e))

    import json
    if not model.is_fitted():
        raise CatBoostError(
            "There is no trained model to use save_model(). Use fit() to train model. Then use this method."
        )

    for name, value in kwargs.items():
        if name == 'target_opset' and value not in [None, 2]:
            warnings.warn(
                'target_opset argument is not supported. Default target_opset is 2 (ai.onnx.ml domain)'
            )
        elif name == 'initial_types' and value is not None:
            warnings.warn('initial_types argument is not supported')

    params_string = ""
    if export_parameters:
        params_string = json.dumps(export_parameters, cls=_NumpyAwareEncoder)

    model_str = _get_onnx_model(model._object, params_string)
    onnx_model = onnx.load_model_from_string(model_str)
    return onnx_model
Exemple #21
0
    def execute(self, output_path):
        """ Execute the freeze process and dump model to disk.

        Args:
            output_path: freezed ONNX format model path.
        """
        f = io.BytesIO()
        convert_model_to_onnx(self.model, self.model_desc, "cpu", f)
        self.onnx_model = onnx.load_model_from_string(f.getvalue())
        if not self._check_control_flow(self.onnx_model):
            print("Control flow not yet supported")
        if not self._check_op_availability(self.onnx_model):
            print("Model ops not fully supported")
        onnx.save(self.onnx_model, output_path)
Exemple #22
0
def pytorch_result_and_model(config,
                             inputs,
                             popart_proto,
                             weight_transposed,
                             is_bwd=False):
    """Run pytorch model based on config.

    Args:
        config (BertConfig): Popart config.
        inputs (np.ndarray): Input np array.
        popart_proto (onnx.proto):  Onnx protobuf.
        weight_transposed (bool): If True, onnx weights are constructed transposed.
        is_bwd (bool, optional): True if bwd_pass. Defaults to False.

    Returns:
        Tuple: Output np.array and Torch model.
    """
    torch_config = TorchBertConfig(config.vocab_length,
                                   config.hidden_size,
                                   config.num_layers,
                                   config.attention_heads,
                                   layer_norm_eps=config.layer_norm_eps)
    torch_model = nn.Embedding(torch_config.vocab_size,
                               torch_config.hidden_size,
                               padding_idx=0)
    # Turn off dropout
    torch_model.eval()

    # Conversion of the popart model to onnx
    proto = onnx.load_model_from_string(popart_proto)
    initializers = get_initializers(proto, weight_transposed)

    for name, weight in torch_model.named_parameters():
        weight.data.copy_(torch.from_numpy(initializers[name]).float())

    result = run_fwd_model(inputs, torch_model)

    if is_bwd:
        optim = torch.optim.SGD(torch_model.parameters(),
                                0.01,
                                weight_decay=0.0,
                                momentum=0.0)

        result = torch_model(*[torch.from_numpy(t).long() for t in inputs])[0]
        torch_loss = 0.1 * torch.norm(result, 1)
        torch_loss.backward()
        optim.step()
        result = [result.detach().numpy()]

    return result, torch_model
def get_model_proto(config, mode, initializers=None):
    model = get_model(config, mode, initializers=initializers)

    sequence_info = popart.TensorInfo(
        "UINT32", [config.micro_batch_size * config.sequence_length])
    indices = model.builder.addInputTensor(sequence_info)
    positions = model.builder.addInputTensor(sequence_info)
    segments = model.builder.addInputTensor(sequence_info)

    if mode == ExecutionMode.PHASED:
        output = model(indices, positions, segments)
    else:
        output = model.build_graph(indices, positions, segments)
    return onnx.load_model_from_string(model.builder.getModelProto())
def testToyBERTSaveAsONNX():
    device = 'cuda'
    onnx_file_name = '_____temp_toy_bert_onnx_model.onnx'
    if os.path.exists(onnx_file_name):
        os.remove(onnx_file_name)
    assert not os.path.exists(onnx_file_name)

    # Load trainer
    model_desc = bert_model_description()
    model = load_bert_onnx_model()

    optim_config = optim.LambConfig()
    opts = orttrainer.ORTTrainerOptions({
        'debug': {
            'deterministic_compute': True
        },
        'device': {
            'id': device,
        },
    })

    trainer = orttrainer.ORTTrainer(model,
                                    model_desc,
                                    optim_config,
                                    options=opts)

    trainer.save_as_onnx(onnx_file_name)
    assert os.path.exists(onnx_file_name)

    with open(onnx_file_name, "rb") as f:
        bin_str = f.read()
        reload_onnx_model = onnx.load_model_from_string(bin_str)
    os.remove(onnx_file_name)

    # Create a new trainer from persisted ONNX model and compare with original ONNX model
    trainer_from_onnx = orttrainer.ORTTrainer(reload_onnx_model,
                                              model_desc,
                                              optim_config,
                                              options=opts)
    assert trainer_from_onnx._onnx_model is not None
    assert (id(trainer_from_onnx._onnx_model) != id(trainer._onnx_model))
    for initializer, loaded_initializer in zip(
            trainer._onnx_model.graph.initializer,
            trainer_from_onnx._onnx_model.graph.initializer):
        assert initializer.name == loaded_initializer.name
    assert (onnx.helper.printable_graph(
        trainer_from_onnx._onnx_model.graph) == onnx.helper.printable_graph(
            trainer._onnx_model.graph))
    _test_helpers.assert_onnx_weights(trainer, trainer_from_onnx)
    def _duplicate_model_for_tuning(self):
        if not os.path.isdir(self.result_path):
            os.mkdir(self.result_path)
        model_dir = os.path.dirname(self.model_path)

        s = _load_bytes(self.model_path)
        model_proto = load_model_from_string(s)
        tensors = _get_all_tensors(model_proto)
        for tensor in tensors:
            info = ExternalDataInfo(tensor)
            file_location = _sanitize_path(info.location)
            if file_location:
                copy(os.path.join(model_dir, file_location), self.result_path)

        optimized_model_path = os.path.join(self.result_path, "optimized_model.onnx")
        copy(self.model_path, optimized_model_path)
        self.model_path = optimized_model_path
def pytorch_result_and_model(torch_config,
                             inputs,
                             popart_proto,
                             mode,
                             is_bwd=False,
                             momentum=0.0):
    # Conversion of the popart model to onnx
    proto = onnx.load_model_from_string(popart_proto)

    torch_model = BertFCN(torch_config)
    # Turn off dropout
    torch_model.eval()

    copy_weights_to_torch(torch_model,
                          proto,
                          TORCH_TO_ONNX[mode],
                          transform=TRANSPOSE_WEIGHTS)

    result = run_fwd_model(inputs, torch_model)

    if is_bwd:
        l1_lambda = 0.1
        optim = torch.optim.SGD(torch_model.parameters(),
                                lr,
                                weight_decay=0.0,
                                momentum=momentum)

        if momentum > 0.0:
            for group in optim.param_groups:
                for p in group['params']:
                    optim.state[p]['momentum_buffer'] = p.data * 0.
                    optim.state[p]['exp_avg'] = p.data * 0.
                    optim.state[p]['exp_avg_sq'] = p.data * 0.
                    optim.state[p]['step'] = 0

        for _ in range(num_reps_bwd):
            result = torch_model(
                *[torch.from_numpy(t).float() for t in inputs])[0]
            torch_loss = l1_lambda * torch.norm(result, 1)
            torch_loss.backward()
            optim.step()
            optim.zero_grad()
        result = [result.detach().numpy()]

    return result, torch_model
Exemple #27
0
    def _get_inference_graph(self, *inputs, **kwargs):
        '''Exports PyTorch `module` to ONNX with training flag, using `*inputs` as input

        TODO: How to support dynamic axes? Dimensions are determined by samples
        '''

        # Setup dynamic axes for onnx model
        input_names, dynamic_axes, self._input_names_require_grad, _ = \
            _ortmodule_io.parse_inputs_for_onnx_export(
                self._original_module_parameters, None, *inputs, **kwargs)
        output_names, output_dynamic_axes, self._original_module_output_schema = \
            _ortmodule_io.parse_outputs_for_onnx_export_and_extract_output_schema(
                self._original_module, inputs, kwargs)
        dynamic_axes.update(output_dynamic_axes)

        # Export torch.nn.Module to ONNX
        f = io.BytesIO()

        # Deepcopy inputs, since input values may change after model run.
        # NOTE: Inputs may contain tensors that have attributes preventing their deepcopy (example grad_fn).
        # Therefore, deepcopy only the data component of the input tensors for export.
        sample_inputs_copy, sample_kwargs_copy = \
            _ortmodule_io.deepcopy_model_input(
                *inputs, **kwargs)

        try:
            with torch.no_grad():
                torch.onnx.export(self._flattened_output_module,
                                  sample_inputs_copy + (sample_kwargs_copy, ),
                                  f,
                                  input_names=input_names,
                                  output_names=output_names,
                                  opset_version=ONNX_OPSET_VERSION,
                                  do_constant_folding=False,
                                  training=torch.onnx.TrainingMode.TRAINING,
                                  dynamic_axes=dynamic_axes,
                                  verbose=self._verbosity < Verbosity.WARNING,
                                  export_params=False,
                                  keep_initializers_as_inputs=True)
        except RuntimeError as e:
            raise RuntimeError(
                'There was an error while exporting the PyTorch model to ONNX: {}'
                .format(e))

        return onnx.load_model_from_string(f.getvalue())
    def test_pyop_hooking(self):  # type: () -> None
        model = torchvision.models.mobilenet_v2(pretrained=False)
        x = torch.rand(1, 3, 224, 224)
        with io.BytesIO() as f:
            torch.onnx.export(model, (x, ), f)
            model = onnx.load_model_from_string(f.getvalue())

            self.assertTrue(model.graph.node[5].op_type == 'Conv')
            hkd_model = hook_model_op(model, model.graph.node[5].name,
                                      TestPyTorchCustomOp.on_hook,
                                      [PyOp.dt_float] * 3)

            so = _ort.SessionOptions()
            so.register_custom_ops_library(_get_library_path())
            sess = _ort.InferenceSession(hkd_model.SerializeToString(), so)
            TestPyTorchCustomOp._hooked = False
            sess.run(None, {'input.1': x.numpy()})
            self.assertTrue(TestPyTorchCustomOp._hooked)
Exemple #29
0
    def is_support(self):
        try:
            import onnx

            if isinstance(self.model, onnx.ModelProto):
                self.onnx_model = self.model
                return True

            if isinstance(self.model, (bytes, bytearray)):
                onnx_model = onnx.load_model_from_string(self.model)
            else:
                # could be either readable or a file path
                onnx_model = onnx.load_model(self.model)

            onnx.checker.check_model(onnx_model)
            self.onnx_model = onnx_model
            return True
        except Exception:
            return False
def pytorch_result_and_model(torch_config,
                             inputs,
                             popart_proto,
                             weight_decay=0.0,
                             lr=0.0,
                             l1_lambda=0.0):

    proto = onnx.load_model_from_string(popart_proto)
    torch_model = BertFCN(torch_config)
    torch_model.eval()  # Turn off dropout
    copy_weights_to_torch(torch_model,
                          proto,
                          TORCH_TO_ONNX,
                          transform=TRANSPOSE_WEIGHTS)
    run_fwd_model(inputs, torch_model)

    decay = []
    no_decay = []
    for name, param in torch_model.named_parameters():
        if "bias" in name or "LayerNorm" in name:
            no_decay.append(param)
        else:
            decay.append(param)

    params = [{
        'params': no_decay,
        'weight_decay': 0.
    }, {
        'params': decay,
        'weight_decay': weight_decay
    }]

    optim = torch.optim.SGD(params, lr, momentum=0.0)

    result = torch_model(*[torch.from_numpy(t).float() for t in inputs])[0]
    torch_loss = l1_lambda * torch.norm(result, 1)
    torch_loss.backward()
    optim.step()
    result = result.detach().numpy()

    return result, torch_model
Exemple #31
0
def _get_onnx_model(torch_model, model_inputs):
    model_outputs = torch_model(*model_inputs)
    if isinstance(model_outputs, torch.Tensor):
        model_outputs = [model_outputs]
    dynamic_axes = {}
    input_names = []
    output_names = []
    for i, model_input in enumerate(model_inputs):
        input_name = f"input-{i}"
        input_names.append(input_name)
        dynamic_axes[input_name] = {}
        for dim_idx in range(len(model_input.shape)):
            dynamic_axes[input_name].update(
                {dim_idx: f"{input_name}_dim{dim_idx}"})

    for i, model_output in enumerate(model_outputs):
        output_name = f"output-{i}"
        output_names.append(output_name)
        dynamic_axes[output_name] = {}
        for dim_idx in range(len(model_output.shape)):
            dynamic_axes[output_name].update(
                {dim_idx: f"{output_name}_dim{dim_idx}"})

    f = io.BytesIO()
    torch.onnx.export(
        torch_model,
        model_inputs,
        f,
        input_names=input_names,
        output_names=output_names,
        opset_version=14,
        do_constant_folding=False,
        training=torch.onnx.TrainingMode.TRAINING,
        dynamic_axes=dynamic_axes,
        export_params=True,
        keep_initializers_as_inputs=False,
    )
    return onnx.load_model_from_string(f.getvalue())