Пример #1
0
    def test_save_and_load_model(self):  # type: () -> None
        proto = self._simple_model()
        cls = ModelProto
        proto_string = onnx._serialize(proto)

        # Test if input is string
        loaded_proto = onnx.load_model_from_string(proto_string)
        self.assertTrue(proto == loaded_proto)

        # Test if input has a read function
        f = io.BytesIO()
        onnx.save_model(proto_string, f)
        f = io.BytesIO(f.getvalue())
        loaded_proto = onnx.load_model(f, cls)
        self.assertTrue(proto == loaded_proto)

        # Test if input is a file name
        try:
            fi = tempfile.NamedTemporaryFile(delete=False)
            onnx.save_model(proto, fi)
            fi.close()

            loaded_proto = onnx.load_model(fi.name, cls)
            self.assertTrue(proto == loaded_proto)
        finally:
            os.remove(fi.name)
Пример #2
0
 def save_model_to_file(self, output_path, use_external_data_format=False):
    '''
    Save model to external data, which is needed for model size > 2GB
    '''
    if use_external_data_format:
        onnx.external_data_helper.convert_model_to_external_data(self.model,
                                                                 all_tensors_to_one_file=True,
                                                                 location=Path(output_path).name + ".data")
    onnx.save_model(self.model, output_path)
Пример #3
0
    def test_convert_model_to_external_data_one_file_per_tensor_with_attribute(self):  # type: () -> None
        model_file_path = self.get_temp_model_filename()

        convert_model_to_external_data(self.model, size_threshold=0, all_tensors_to_one_file=False, convert_attribute=True)
        onnx.save_model(self.model, model_file_path)

        self.assertTrue(Path.isfile(model_file_path))
        self.assertTrue(Path.isfile(os.path.join(self.temp_dir, "input_value")))
        self.assertTrue(Path.isfile(os.path.join(self.temp_dir, "attribute_value")))
def onnx_convert(keras_model_file, output_file):
    custom_object_dict = get_custom_objects()
    model = load_model(keras_model_file, custom_objects=custom_object_dict)

    # convert to onnx model
    onnx_model = keras2onnx.convert_keras(model, model.name)

    # save converted onnx model
    onnx.save_model(onnx_model, output_file)
Пример #5
0
    def test_convert_model_to_external_data_with_size_threshold(self) -> None:
        model_file_path = self.get_temp_model_filename()

        convert_model_to_external_data(self.model, size_threshold=1024)
        onnx.save_model(self.model, model_file_path)

        model = onnx.load_model(model_file_path)
        initializer_tensor = model.graph.initializer[0]
        self.assertFalse(initializer_tensor.HasField("data_location"))
Пример #6
0
def quantize_dynamic(model_input: Path,
                     model_output: Path,
                     op_types_to_quantize=[],
                     per_channel=False,
                     reduce_range=False,
                     activation_type=QuantType.QUInt8,
                     weight_type=QuantType.QUInt8,
                     nodes_to_quantize=[],
                     nodes_to_exclude=[]):
    '''
        Given an onnx model, create a quantized onnx model and save it into a file
    :param model_input: file path of model to quantize
    :param model_output: file path of quantized model
    :param op_types_to_quantize: specify the types of operators to quantize, like ['Conv'] to quantize Conv only. It quantizes all supported operators by default
    :param per_channel: quantize weights per channel
    :param reduce_range: quantize weights with 7-bits. It may improve the accuracy for some models running on non-VNNI machine, especially for per-channel mode
    :param nbits: number of bits to represent quantized data. Currently only supporting 8-bit types
    :param activation_type: quantization data type of activation
    :param weight_type: quantization data type of weight
    :param nodes_to_quantize:
        List of nodes names to quantize. When this list is not None only the nodes in this list
        are quantized.
        example:
        [
            'Conv__224',
            'Conv__252'
        ]
    :param nodes_to_exclude:
        List of nodes names to exclude. The nodes in this list will be excluded from quantization
        when it is not None.
    '''

    input_qType = onnx_proto.TensorProto.INT8 if activation_type == QuantType.QInt8 else onnx_proto.TensorProto.UINT8
    weight_qType = onnx_proto.TensorProto.INT8 if weight_type == QuantType.QInt8 else onnx_proto.TensorProto.UINT8
    mode = QuantizationMode.IntegerOps

    #optimize the original model
    optimized_model = optimize_model(Path(model_input))

    if not op_types_to_quantize or len(op_types_to_quantize) == 0:
        op_types_to_quantize = list(IntegerOpsRegistry.keys())

    quantizer = ONNXQuantizer(
        optimized_model,
        per_channel,
        reduce_range,
        mode,
        False,  #static
        weight_qType,
        input_qType,
        None,
        nodes_to_quantize,
        nodes_to_exclude,
        op_types_to_quantize)

    quantizer.quantize_model()
    onnx.save_model(quantizer.model.model, model_output)
Пример #7
0
def onnx_convert(keras_model_file, output_file, op_set):
    #custom_object_dict = get_custom_objects()
    model = load_model(keras_model_file)

    # convert to onnx model
    onnx_model = keras2onnx.convert_keras(model, model.name, target_opset=op_set)

    # save converted onnx model
    onnx.save_model(onnx_model, output_file)
Пример #8
0
    def cpu_infer(self, case_dir: str, model_file: bytes, type: str,
                  mode: str):
        # create session
        try:
            print('[onnx]: using simplified model')
            sess = ort.InferenceSession(model_file)
        except Exception as e:
            print(e)
            try:
                print('[onnx]: using origin model')
                model_file = os.path.join(case_dir, 'test.onnx')
                sess = ort.InferenceSession(model_file)
            except Exception as e:
                print(e)
                print('[onnx]: using converted model')
                onnx_model = onnx.load(model_file)
                onnx_model = version_converter.convert_version(onnx_model, 8)
                model_file = os.path.join(case_dir, 'converted.onnx')
                onnx.save_model(onnx_model, model_file)
                sess = ort.InferenceSession(model_file)

        if mode is "dataset":
            for input in self.inputs:
                input_dict = {}
                for in_data in input['data']:
                    topk = []
                    input_dict[input['name']] = self.transform_input(
                        self.data_pre_process(in_data[0]), "float32", "CPU")
                    outputs = sess.run(None, input_dict)
                    for output in outputs:
                        topk.append((in_data[1], get_topK('cpu', 1,
                                                          output)[0]))
                    if os.path.exists(os.path.join(case_dir,
                                                   "cpu_dataset.txt")):
                        os.remove(os.path.join(case_dir, "cpu_dataset.txt"))
                    self.output_paths.append(
                        (os.path.join(case_dir, 'cpu_result_0.bin'),
                         os.path.join(case_dir, 'cpu_result_0.txt')))
                    with open(self.output_paths[-1][1], 'a') as f:
                        for i in range(len(topk)):
                            f.write(topk[i][0].split('/')[-1] + " " +
                                    str(topk[i][1]) + '\n')
        else:
            input_dict = {}
            for input in self.inputs:
                input_dict[input['name']] = self.transform_input(
                    self.data_pre_process(input['data']), "float32", "CPU")

            outputs = sess.run(None, input_dict)
            i = 0
            for output in outputs:
                bin_file = os.path.join(case_dir, f'cpu_result_{i}.bin')
                text_file = os.path.join(case_dir, f'cpu_result_{i}.txt')
                self.output_paths.append((bin_file, text_file))
                output.tofile(bin_file)
                self.totxtfile(text_file, output)
                i += 1
Пример #9
0
    def save_model_to_file(self, output_path):
        logger.info(f"Output model to {output_path}")

        if output_path.endswith(".json"):
            assert isinstance(self.model, ModelProto)
            with open(output_path, "w") as out:
                out.write(str(self.model))
        else:
            save_model(self.model, output_path, format=None)
Пример #10
0
def setup_module():
    onnx.save_model(create_onnx_model(), onnx_model_filename)
    onnx.save_model(create_onnx_model_2(), onnx_model_2_filename)
    onnx.save_model(create_onnx_model_with_custom_attributes(),
                    onnx_model_with_custom_attributes_filename)
    onnx.save_model(create_onnx_model_with_subgraphs(),
                    onnx_model_with_subgraphs_filename)
    onnx.save_model(create_onnx_model_for_op_extension(),
                    onnx_model_for_op_extension_test)
Пример #11
0
    def test_convert_model_to_external_data_without_size_threshold(self):  # type: () -> None
        model_file_path = self.get_temp_model_filename()
        convert_model_to_external_data(self.model, size_threshold=0)
        onnx.save_model(self.model, model_file_path)

        model = onnx.load_model(model_file_path)
        initializer_tensor = model.graph.initializer[0]
        self.assertTrue(initializer_tensor.HasField("data_location"))
        self.assertTrue(np.allclose(to_array(initializer_tensor), self.initializer_value))
Пример #12
0
def quantize(onnx_model_path: Path) -> Path:
    """
    Quantize the weights of the model from float32 to in8 to allow very efficient inference on modern CPU

    Args:
        onnx_model_path: Path to location the exported ONNX model is stored

    Returns: The Path generated for the quantized
    """
    import onnx
    from onnx.onnx_pb import ModelProto
    from onnxruntime.quantization import QuantizationMode
    from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer
    from onnxruntime.quantization.registry import IntegerOpsRegistry

    # Load the ONNX model
    onnx_model = onnx.load(onnx_model_path.as_posix())

    if parse(onnx.__version__) < parse("1.5.0"):
        print(
            "Models larger than 2GB will fail to quantize due to protobuf constraint.\n"
            "Please upgrade to onnxruntime >= 1.5.0.")

    # Copy it
    copy_model = ModelProto()
    copy_model.CopyFrom(onnx_model)

    # Construct quantizer
    quantizer = ONNXQuantizer(
        model=copy_model,
        per_channel=False,
        reduce_range=False,
        mode=QuantizationMode.IntegerOps,
        static=False,
        weight_qType=True,
        input_qType=False,
        tensors_range=None,
        nodes_to_quantize=None,
        nodes_to_exclude=None,
        op_types_to_quantize=list(IntegerOpsRegistry),
    )

    # Quantize and export
    quantizer.quantize_model()

    # Append "-quantized" at the end of the model's name
    quantized_model_path = generate_identified_filename(
        onnx_model_path, "-quantized")

    # Save model
    print(
        f"Quantized model has been written at {quantized_model_path}: \N{heavy check mark}"
    )
    onnx.save_model(quantizer.model.model, quantized_model_path.as_posix())

    return quantized_model_path
Пример #13
0
def save_model(onnx_model, path, conda_env=None, mlflow_model=Model()):
    """
    Save an ONNX model to a path on the local file system.

    :param onnx_model: ONNX model to be saved.
    :param path: Local path where the model is to be saved.
    :param conda_env: Either a dictionary representation of a Conda environment or the path to a
                      Conda environment yaml file. If provided, this describes the environment
                      this model should be run in. At minimum, it should specify the dependencies
                      contained in :func:`get_default_conda_env()`. If `None`, the default
                      :func:`get_default_conda_env()` environment is added to the model.
                      The following is an *example* dictionary representation of a Conda
                      environment::

                        {
                            'name': 'mlflow-env',
                            'channels': ['defaults'],
                            'dependencies': [
                                'python=3.6.0',
                                'onnx=1.4.1',
                                'onnxruntime=0.3.0'
                            ]
                        }

    :param mlflow_model: :py:mod:`mlflow.models.Model` this flavor is being added to.
    """
    import onnx

    path = os.path.abspath(path)
    if os.path.exists(path):
        raise MlflowException(message="Path '{}' already exists".format(path),
                              error_code=RESOURCE_ALREADY_EXISTS)
    os.makedirs(path)
    model_data_subpath = "model.onnx"
    model_data_path = os.path.join(path, model_data_subpath)

    # Save onnx-model
    onnx.save_model(onnx_model, model_data_path)

    conda_env_subpath = "conda.yaml"
    if conda_env is None:
        conda_env = get_default_conda_env()
    elif not isinstance(conda_env, dict):
        with open(conda_env, "r") as f:
            conda_env = yaml.safe_load(f)
    with open(os.path.join(path, conda_env_subpath), "w") as f:
        yaml.safe_dump(conda_env, stream=f, default_flow_style=False)

    pyfunc.add_to_model(mlflow_model,
                        loader_module="mlflow.onnx",
                        data=model_data_subpath,
                        env=conda_env_subpath)
    mlflow_model.add_flavor(FLAVOR_NAME,
                            onnx_version=onnx.__version__,
                            data=model_data_subpath)
    mlflow_model.save(os.path.join(path, "MLmodel"))
    def test_compile_with_no_input_formats(self):
        with NamedTemporaryFile(suffix='.onnx') as model_file:
            onnx.save_model(_make_onnx_model().model_proto, model_file.name)

            config = Config.from_json({})
            compiled = onnx_compiler.compile_source(source=ONNXModelFile(
                model_file.name),
                                                    config=config)

        self.assertEqual(compiled.input_data_formats, [None, None])
Пример #15
0
def export_and_recurse(node, attribute, output_dir, level):
    name = node.name
    name = name.replace('/', '_')
    sub_model = onnx.ModelProto()
    sub_model.graph.MergeFrom(attribute.g)
    filename = 'L' + str(
        level
    ) + '_' + node.op_type + '_' + attribute.name + '_' + name + '.onnx'
    onnx.save_model(sub_model, os.path.join(output_dir, filename))
    dump_subgraph(sub_model, output_dir, level + 1)
Пример #16
0
def export_and_recurse(node, attribute, output_dir, level):
    name = node.name
    name = name.replace("/", "_")
    sub_model = onnx.ModelProto()
    sub_model.graph.MergeFrom(attribute.g)
    filename = "L" + str(
        level
    ) + "_" + node.op_type + "_" + attribute.name + "_" + name + ".onnx"
    onnx.save_model(sub_model, os.path.join(output_dir, filename))
    dump_subgraph(sub_model, output_dir, level + 1)
Пример #17
0
    def run_onnx_runtime(self,
                         case_name,
                         onnx_model,
                         data,
                         expected,
                         rtol=1.e-3,
                         atol=1.e-6):
        temp_model_file = TestKerasApplications.get_temp_file('temp_' +
                                                              case_name +
                                                              '.onnx')
        onnx.save_model(onnx_model, temp_model_file)
        try:
            import onnxruntime
            sess = onnxruntime.InferenceSession(temp_model_file)
        except ImportError:
            return True

        if not isinstance(expected, list):
            expected = [expected]

        data = data if isinstance(data, list) else [data]
        input_names = sess.get_inputs()
        # to avoid too complicated test code, we restrict the input name in Keras test cases must be
        # in alphabetical order. It's always true unless there is any trick preventing that.
        feed = zip(sorted(i_.name for i_ in input_names), data)
        actual = sess.run(None, dict(feed))
        res = all(
            np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol)
            for n_ in range(len(expected)))
        if res and temp_model_file not in self.model_files:  # still keep the failed case files for the diagnosis.
            self.model_files.append(temp_model_file)

        if not res:
            for n_ in range(len(expected)):
                expected_list = expected[n_].flatten()
                actual_list = actual[n_].flatten()
                diff_list = abs(expected_list - actual_list)
                count_total = len(expected_list)
                count_error = 0

                for e_, a_, d_ in zip(expected_list, actual_list, diff_list):
                    if d_ > atol + rtol * abs(a_):
                        if count_error < 10:  # print the first 10 mismatches
                            print("case = " + case_name +
                                  ", result mismatch for expected = " +
                                  str(e_) + ", actual = " + str(a_),
                                  file=sys.stderr)
                        count_error = count_error + 1

                print("case = " + case_name + ", " + str(count_error) +
                      " mismatches out of " + str(count_total) + " for list " +
                      str(n_),
                      file=sys.stderr)

        return res
Пример #18
0
def SaveOnnxModel(onnx_model, onnx_save_path, need_polish=True):
    try:

        if need_polish:
            polished_model = onnx.utils.polish_model(onnx_model)
            onnx.save_model(polished_model, onnx_save_path)
        else:
            onnx.save_model(onnx_model, onnx_save_path)
        print("模型保存成功,已保存至:" + onnx_save_path)
    except Exception as e:
        print("模型存在问题,未保存成功:", e)
Пример #19
0
    def test_mask_rcnn(self):
        onnx_model = keras2onnx.convert_keras(
            model.keras_model,
            target_opset=10,
            custom_op_conversions=tf2onnx_contrib_op_conversion)
        import skimage
        img_path = os.path.join(os.path.dirname(__file__), '../data',
                                'street.jpg')
        image = skimage.io.imread(img_path)
        images = [image]
        case_name = 'mask_rcnn'

        if not os.path.exists(tmp_path):
            os.mkdir(tmp_path)
        temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
        onnx.save_model(onnx_model, temp_model_file)
        try:
            import onnxruntime
            sess = onnxruntime.InferenceSession(temp_model_file)
        except ImportError:
            return True

        # preprocessing
        molded_images, image_metas, windows = model.mold_inputs(images)
        anchors = model.get_anchors(molded_images[0].shape)
        anchors = np.broadcast_to(anchors,
                                  (model.config.BATCH_SIZE, ) + anchors.shape)

        expected = model.keras_model.predict([
            molded_images.astype(np.float32),
            image_metas.astype(np.float32), anchors
        ])

        actual = \
            sess.run(None, {"input_image": molded_images.astype(np.float32),
                            "input_anchors": anchors,
                            "input_image_meta": image_metas.astype(np.float32)})

        rtol = 1.e-3
        atol = 1.e-6
        compare_idx = [0, 3]
        res = all(
            np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol)
            for n_ in compare_idx)
        if res and temp_model_file not in self.model_files:  # still keep the failed case files for the diagnosis.
            self.model_files.append(temp_model_file)
        if not res:
            for n_ in compare_idx:
                expected_list = expected[n_].flatten()
                actual_list = actual[n_].flatten()
                print_mismatches(case_name, n_, expected_list, actual_list,
                                 atol, rtol)

        self.assertTrue(res)
Пример #20
0
 def test_to_array_with_external_data(self):  # type: () -> None
     onnx.save_model(self.model,
                     self.model_file_path,
                     save_as_external_data=True,
                     all_tensors_to_one_file=False,
                     size_threshold=0)
     # raw_data of external tensor is not loaded
     model = onnx.load(self.model_file_path, load_external_data=False)
     # Specify self.temp_dir to load external tensor
     loaded_large_data = to_array(model.graph.initializer[0], self.temp_dir)
     self.assertTrue(np.allclose(loaded_large_data, self.large_data))
Пример #21
0
  def test_node_name_type_custom_functions(self):  # type: () -> None
    def convert_acos(builder, node, graph, err):
      params = NeuralNetwork_pb2.CustomLayerParams()
      params.className = node.op_type
      params.description = "Custom layer that corresponds to the ONNX op {}".format(node.op_type, )

      builder.add_custom(
        name=node.name,
        input_names=node.inputs,
        output_names=node.outputs,
        custom_proto_spec=params
      )

    def convert_topk_generic(builder, node, graph, err):
      params = NeuralNetwork_pb2.CustomLayerParams()
      params.className = node.op_type
      params.description = "Custom layer that corresponds to the ONNX op {}".format(node.op_type, )
      params.parameters["axis"].intValue = node.attrs.get('axis', -1)
      params.parameters["k"].intValue = node.attrs['k']

      builder.add_custom(
        name=node.name,
        input_names=node.inputs,
        output_names=node.outputs,
        custom_proto_spec=params
      )

    def convert_topk_node_specific(builder, node, graph, err):
      params = NeuralNetwork_pb2.CustomLayerParams()
      params.className = node.op_type
      params.description = "Custom layer that corresponds to the ONNX op {}".format(node.op_type, )
      params.parameters["axis"].intValue = node.attrs.get('axis', -1)

      builder.add_custom(
        name=node.name,
        input_names=node.inputs,
        output_names=node.outputs,
        custom_proto_spec=params
      )

    onnx_model = _make_model_acos_exp_topk()
    onnx.save_model(onnx_model, 'acos.onnx')
    coreml_model = convert(model=onnx_model,
                           add_custom_layers=True,
                           custom_conversion_functions={'Acos':convert_acos, 'TopK':convert_topk_generic,
                                                        'output_values_output_indices':convert_topk_node_specific})

    spec = coreml_model.get_spec()
    layers = spec.neuralNetwork.layers
    self.assertIsNotNone(layers[0].custom)
    self.assertIsNotNone(layers[2].custom)
    self.assertEqual('Acos', layers[0].custom.className)
    self.assertEqual('TopK', layers[2].custom.className)
    self.assertEqual(0, layers[2].custom.parameters['axis'].intValue)
Пример #22
0
    def test_save_model_does_not_convert_to_external_data_and_saves_the_model(self):  # type: () -> None
        model_file_path = self.get_temp_model_filename()
        onnx.save_model(self.model, model_file_path, save_as_external_data=False)
        self.assertTrue(Path.isfile(model_file_path))

        model = onnx.load_model(model_file_path)
        initializer_tensor = model.graph.initializer[0]
        self.assertFalse(initializer_tensor.HasField("data_location"))

        attribute_tensor = model.graph.node[0].attribute[0].t
        self.assertFalse(attribute_tensor.HasField("data_location"))
Пример #23
0
def main():
    if len(sys.argv) < 4:
        print('decast.py model_in  model_out <op1, ...>')
        return

    input = sys.argv[1]
    output = sys.argv[2]
    op_list = sys.argv[3:]

    oxml = onnx.load_model(input)
    oxml = decast(oxml, op_list)
    onnx.save_model(oxml, output)
Пример #24
0
    def test_convert_model_to_external_data_from_one_file_without_location_uses_model_name(
            self):  # type: () -> None
        model_file_path = self.get_temp_model_filename()

        convert_model_to_external_data(self.model,
                                       size_threshold=0,
                                       all_tensors_to_one_file=True)
        onnx.save_model(self.model, model_file_path)

        self.assertTrue(Path.isfile(model_file_path))
        self.assertTrue(
            Path.isfile(os.path.join(self.temp_dir, model_file_path)))
Пример #25
0
 def save_model_weights(self, model, epoch):
     save_path = self.get_save_path(epoch)
     model.save_weights(save_path, overwrite=True)
     # try:
     if _has_onnx:
         save_path = self.get_save_path(epoch, ext='onnx')
         onnx_model = keras2onnx.convert_keras(model, model.name,
                                               target_opset=10)
         onnx.save_model(onnx_model, save_path)
     # except Exception as e:
     #     print(e)
     return
Пример #26
0
    def test_yolov3(self):
        img_path = os.path.join(os.path.dirname(__file__), '../data', 'street.jpg')
        yolo3_yolo3_dir = os.path.join(os.path.dirname(__file__), '../../../keras-yolo3/yolo3')
        try:
            import onnxruntime
        except ImportError:
            return True

        from PIL import Image

        for is_tiny_yolo in [True, False]:
            if is_tiny_yolo:
                if not os.path.exists(tiny_model_file_name):
                    urllib.request.urlretrieve(YOLOV3_TINY_WEIGHTS_PATH, tiny_model_file_name)
                yolo_weights = load_model(tiny_model_file_name)
                model_path = tiny_model_file_name  # model path or trained weights path
                anchors_path = 'model_data/tiny_yolo_anchors.txt'
                case_name = 'yolov3-tiny'
            else:
                if not os.path.exists(model_file_name):
                    urllib.request.urlretrieve(YOLOV3_WEIGHTS_PATH, model_file_name)
                yolo_weights = load_model(model_file_name)
                model_path = model_file_name  # model path or trained weights path
                anchors_path = 'model_data/yolo_anchors.txt'
                case_name = 'yolov3'

            my_yolo = YOLO(model_path, anchors_path, yolo3_yolo3_dir)
            my_yolo.load_model(yolo_weights)
            onnx_model = convert_model(my_yolo, is_tiny_yolo)

            if not os.path.exists(tmp_path):
                os.mkdir(tmp_path)
            temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
            onnx.save_model(onnx_model, temp_model_file)

            sess = onnxruntime.InferenceSession(temp_model_file)

            image = Image.open(img_path)
            image_data = my_yolo.prepare_keras_data(image)

            all_boxes_k, all_scores_k, indices_k = my_yolo.final_model.predict([image_data, np.array([image.size[1], image.size[0]], dtype='float32').reshape(1, 2)])

            image_data_onnx = np.transpose(image_data, [0, 3, 1, 2])

            feed_f = dict(zip(['input_1', 'image_shape'],
                              (image_data_onnx, np.array([image.size[1], image.size[0]], dtype='float32').reshape(1, 2))))
            all_boxes, all_scores, indices = sess.run(None, input_feed=feed_f)

            expected = self.post_compute(all_boxes_k, all_scores_k, indices_k)
            actual = self.post_compute(all_boxes, all_scores, indices)

            res = all(np.allclose(expected[n_], actual[n_]) for n_ in range(3))
            self.assertTrue(res)
Пример #27
0
    def test_yolov3(self):
        img_path = os.path.join(os.path.dirname(__file__), '../data',
                                'street.jpg')
        yolo3_yolo3_dir = os.path.join(os.path.dirname(__file__),
                                       '../../keras-yolo3/yolo3')

        if not os.path.exists(model_file_name):
            urllib.request.urlretrieve(YOLOV3_WEIGHTS_PATH, model_file_name)

        yolo_weights = load_model(model_file_name)
        my_yolo = YOLO(yolo3_yolo3_dir)
        my_yolo.load_model(yolo_weights)
        case_name = 'yolov3'
        target_opset = 10
        onnx_model = keras2onnx.convert_keras(my_yolo.final_model,
                                              target_opset=target_opset,
                                              channel_first_inputs=['input_1'])

        if not os.path.exists(tmp_path):
            os.mkdir(tmp_path)
        temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
        onnx.save_model(onnx_model, temp_model_file)

        try:
            import onnxruntime
            sess = onnxruntime.InferenceSession(temp_model_file)
        except ImportError:
            return True

        from PIL import Image
        image = Image.open(img_path)
        image_data = my_yolo.prepare_keras_data(image)

        all_boxes_k, all_scores_k, indices_k = my_yolo.final_model.predict([
            image_data,
            np.array([image.size[1], image.size[0]],
                     dtype='float32').reshape(1, 2)
        ])

        image_data_onnx = np.transpose(image_data, [0, 3, 1, 2])

        feed_f = dict(
            zip(['input_1', 'image_shape'],
                (image_data_onnx,
                 np.array([image.size[1], image.size[0]],
                          dtype='float32').reshape(1, 2))))
        all_boxes, all_scores, indices = sess.run(None, input_feed=feed_f)

        expected = self.post_compute(all_boxes_k, all_scores_k, indices_k)
        actual = self.post_compute(all_boxes, all_scores, indices)

        res = all(np.allclose(expected[n_], actual[n_]) for n_ in range(3))
        self.assertTrue(res)
Пример #28
0
def main(args):
    if args.weight_h5 == '':
        print("Not found --weight_path!")
        exit()

    onnx_model_name = args.weight_onnx

    model = get_model(width=224, height=224, depth=32)
    model.load_weights(args.weight_h5)
    onnx_model = keras2onnx.convert_keras(model, model.name)
    onnx.save_model(onnx_model, onnx_model_name)
    print("Saved file:", args.weight_onnx)
Пример #29
0
    def test_save_model_with_external_data_multiple_times(
            self):  # type: () -> None
        # Test onnx.save should respectively handle typical tensor and external tensor properly
        # 1st save: save two tensors which have raw_data
        # Only w_large will be stored as external tensors since it's larger than 1024
        onnx.save_model(self.model,
                        self.model_file_path,
                        save_as_external_data=True,
                        all_tensors_to_one_file=False,
                        location=None,
                        size_threshold=1024,
                        convert_attribute=True)
        model_without_loading_external = onnx.load(self.model_file_path,
                                                   load_external_data=False)
        large_input_tensor = model_without_loading_external.graph.initializer[
            0]
        self.assertTrue(large_input_tensor.HasField("data_location"))
        self.assertTrue(
            np.allclose(to_array(large_input_tensor, self.temp_dir),
                        self.large_data))

        small_shape_tensor = model_without_loading_external.graph.initializer[
            1]
        self.assertTrue(not small_shape_tensor.HasField("data_location"))
        self.assertTrue(
            np.allclose(to_array(small_shape_tensor), self.small_data))

        # 2nd save: one tensor has raw_data (small); one external tensor (large)
        # Save them both as external tensors this time
        onnx.save_model(model_without_loading_external,
                        self.model_file_path,
                        save_as_external_data=True,
                        all_tensors_to_one_file=False,
                        location=None,
                        size_threshold=0,
                        convert_attribute=True)

        model_without_loading_external = onnx.load(self.model_file_path,
                                                   load_external_data=False)
        large_input_tensor = model_without_loading_external.graph.initializer[
            0]
        self.assertTrue(large_input_tensor.HasField("data_location"))
        self.assertTrue(
            np.allclose(to_array(large_input_tensor, self.temp_dir),
                        self.large_data))

        small_shape_tensor = model_without_loading_external.graph.initializer[
            1]
        self.assertTrue(small_shape_tensor.HasField("data_location"))
        self.assertTrue(
            np.allclose(to_array(small_shape_tensor, self.temp_dir),
                        self.small_data))
Пример #30
0
def quantize_qat(model_input: Path,
                 model_output: Path,
                 op_types_to_quantize=[],
                 per_channel=False,
                 activation_type=QuantType.QUInt8,
                 weight_type=QuantType.QUInt8,
                 nodes_to_quantize=[],
                 nodes_to_exclude=[]):
    '''
        Given a quantize-aware traning onnx model, create a quantized onnx model and save it into a file
    :param model_input: file path of model to quantize
    :param model_output: file path of quantized model
    :param op_types_to_quantize: specify the types of operators to quantize, like ['Conv'] to quantize Conv only. It quantizes all supported operators by default.
    :param per_channel: quantize weights per channel
    :param activation_type: quantization data type of activation
    :param nodes_to_quantize:
        List of nodes names to quantize. When this list is not None only the nodes in this list
        are quantized.
        example:
        [
            'Conv__224',
            'Conv__252'
        ]
    :param nodes_to_exclude:
        List of nodes names to exclude. The nodes in this list will be excluded from quantization
        when it is not None.
    '''

    input_qType = onnx_proto.TensorProto.INT8 if activation_type == QuantType.QInt8 else onnx_proto.TensorProto.UINT8
    weight_qType = onnx_proto.TensorProto.INT8 if weight_type == QuantType.QInt8 else onnx_proto.TensorProto.UINT8
    mode = QuantizationMode.IntegerOps

    #optimize the original model
    optimized_model = optimize_model(Path(model_input))

    if not op_types_to_quantize or len(op_types_to_quantize) == 0:
        op_types_to_quantize = list(IntegerOpsRegistry.keys())

    quantizer = ONNXQuantizer(
        optimized_model,
        per_channel,
        mode,
        False,  #static
        weight_qType,
        input_qType,
        None,
        nodes_to_quantize,
        nodes_to_exclude,
        op_types_to_quantize)

    quantizer.quantize_model()
    onnx.save_model(quantizer.model.model, model_output)
Пример #31
0
def run_onnx_runtime(case_name,
                     onnx_model,
                     data,
                     expected,
                     model_files,
                     rtol=1.e-3,
                     atol=1.e-6):
    if not os.path.exists(tmp_path):
        os.mkdir(tmp_path)
    temp_model_file = os.path.join(tmp_path, 'temp_' + case_name + '.onnx')
    onnx.save_model(onnx_model, temp_model_file)
    try:
        import onnxruntime
        sess = onnxruntime.InferenceSession(temp_model_file)
    except ImportError:
        keras2onnx.common.k2o_logger().warning("Cannot import ONNXRuntime!")
        return True

    if isinstance(data, dict):
        feed_input = data
    else:
        data = data if isinstance(data, list) else [data]
        input_names = sess.get_inputs()
        # to avoid too complicated test code, we restrict the input name in Keras test cases must be
        # in alphabetical order. It's always true unless there is any trick preventing that.
        feed = zip(sorted(i_.name for i_ in input_names), data)
        feed_input = dict(feed)
    actual = sess.run(None, feed_input)

    if expected is None:
        return

    if isinstance(expected, tuple):
        expected = list(expected)
    elif not isinstance(expected, list):
        expected = [expected]

    res = all(
        np.allclose(expected[n_], actual[n_], rtol=rtol, atol=atol)
        for n_ in range(len(expected)))

    if res and temp_model_file not in model_files:  # still keep the failed case files for the diagnosis.
        model_files.append(temp_model_file)

    if not res:
        for n_ in range(len(expected)):
            expected_list = expected[n_].flatten()
            actual_list = actual[n_].flatten()
            print_mismatches(case_name, n_, expected_list, actual_list, rtol,
                             atol)

    return res