Beispiel #1
0
def load_then_export_model(opt, dataloader):
    """ Load model based on the model name.

    Arguments:
        opt {[argparse.Namespace]} -- options

    Returns:
        [model] -- Returned model
    """
    model_name = opt.model
    model_path = f"lib.models.{model_name}"
    model_lib = importlib.import_module(model_path)
    model = getattr(model_lib, model_name.title())
    model_inst = model(opt, dataloader)
    model_inst.load_weights(opt.epoch)

    input_data = torch.empty((1, opt.nc, opt.isize, opt.isize),
                             dtype=next(model_inst.netg.parameters()).dtype,
                             device=next(model_inst.netg.parameters()).device)

    # Export the model to ONNX
    with torch.no_grad():
        with io.BytesIO() as f:
            torch.onnx.export(
                model_inst.netd,
                (input_data, ),
                f,
                operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
                verbose=True,  # NOTE: uncomment this for debugging
                # export_params=True,
            )
            onnx_model_d = onnx.load_from_string(f.getvalue())

            torch.onnx.export(
                model_inst.netg,
                (input_data, ),
                f,
                operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
                verbose=True,  # NOTE: uncomment this for debugging
                # export_params=True,
            )
            onnx_model_g = onnx.load_from_string(f.getvalue())

    model_name = model_name.replace('-', '')

    path_g = f"./output/{model_name}/{opt.dataset}/train/weights/exportG.onnx"
    print(f'saving model in {path_g}')
    onnx.save(onnx_model_g, path_g)

    path_d = f"./output/{model_name}/{opt.dataset}/train/weights/exportD.onnx"
    print(f'saving model in {path_d}')
    onnx.save(onnx_model_d, path_d)

    return
Beispiel #2
0
    def test_initializer_sequence(self):
        class MyModule(torch.nn.Module):
            def __init__(self, input_size, hidden_size, num_classes):
                super().__init__()
                self.fc1 = torch.nn.Linear(input_size, hidden_size)
                self.relu = torch.nn.ReLU()
                self.fc2 = torch.nn.Linear(hidden_size, num_classes)

            def forward(self, x):
                out = self.fc1(x)
                out = self.relu(out)
                out = self.fc2(out)
                return out

        test_model = MyModule(3, 4, 10)
        state_dict_list = [k for (k, v) in test_model.state_dict().items()]
        named_params_list = [k for (k, v) in test_model.named_parameters()]

        x = torch.randn(32, 3)
        f = io.BytesIO()
        torch.onnx._export(test_model, (x, ), f, do_constant_folding=False)
        loaded_model = onnx.load_from_string(f.getvalue())

        actual_list = [p.name for p in loaded_model.graph.initializer]
        assert actual_list == state_dict_list, (
            "Initializers' sequence is not as same as state_dict(). Expected: ("
            + ", ".join(state_dict_list) + "). Actual:(" +
            ", ".join(actual_list) + ").")
        assert actual_list == named_params_list, (
            "Initializers' sequence is not as same as named_parameters(). Expected: ("
            + ", ".join(named_params_list) + "). Actual:(" +
            ", ".join(actual_list) + ").")
Beispiel #3
0
    def test_onnx_checker_invalid_graph(self):
        class CustomAddModule(torch.nn.Module):
            def forward(self, x, y):
                return torch.add(x, y)

        def symbolic_custom_invalid_add(g, input, other, alpha=None):
            return g.op("Add", input, other, invalid_attr_i=1)

        torch.onnx.register_custom_op_symbolic("::add",
                                               symbolic_custom_invalid_add, 1)

        x = torch.randn(2, 3, 4)
        y = torch.randn(2, 3, 4)

        test_model = CustomAddModule()
        f = io.BytesIO()

        try:
            with self.assertRaises(torch.onnx.errors.CheckerError):
                torch.onnx.export(test_model, (x, y), f)
        finally:
            torch.onnx.unregister_custom_op_symbolic("::add", 1)

        self.assertTrue(f.getvalue(), "ONNX graph was not exported.")
        loaded_model = onnx.load_from_string(f.getvalue())
Beispiel #4
0
def _run_comparison_test(data, result, proto, expected_activations,
                         lstm_op_pattern):
    model = onnx.load_from_string(proto)

    if expected_activations:
        lstms = [i for i in model.graph.node if i.op_type == 'LSTM']
        assert len(lstms) == 1
        activations = [
            i for i in lstms[0].attribute if i.name == 'activations'
        ]
        assert len(activations) == 1
        activations = activations[0].strings
        assert len(activations) == len(expected_activations)
        for expected, actual in zip(expected_activations, activations):
            assert expected == actual.decode('utf-8').lower()

    outId = model.graph.output[0].name
    inId = model.graph.input[0].name

    dataFlow = popart.DataFlow(1, {outId: popart.AnchorReturnType("All")})
    patterns = popart.Patterns(popart.PatternsLevel.Default)
    patterns.enablePattern('LSTMOp', lstm_op_pattern)
    session = popart.InferenceSession(fnModel=proto,
                                      dataFlow=dataFlow,
                                      deviceInfo=tu.create_test_device(),
                                      patterns=patterns)

    session.prepareDevice()

    anchors = session.initAnchorArrays()
    stepio = popart.PyStepIO({inId: data}, anchors)
    session.run(stepio)

    assert np.allclose(anchors[outId], result)
Beispiel #5
0
    def test_shape_value_map(self):
        class RSoftMax(torch.nn.Module):
            def __init__(self, radix, cardinality):
                super().__init__()
                self.radix = radix
                self.cardinality = cardinality

            def forward(self, x):
                batch = x.size(0)
                x = x.view(batch, self.cardinality, self.radix,
                           -1).transpose(1, 2)
                x = F.softmax(x, dim=1)
                x = x.reshape(batch, -1)
                return x

        radix = 2
        cardinality = 1
        x = torch.randn(10, 1, 128, 1)
        f = io.BytesIO()
        torch.onnx.export(
            RSoftMax(radix, cardinality),
            (x, ),
            f,
            input_names=["x"],
            dynamic_axes={"x": [0]},
        )
        loaded_model = onnx.load_from_string(f.getvalue())
        self.assertEqual(
            loaded_model.graph.output[0].type.tensor_type.shape.dim[1].
            dim_value, 128)
Beispiel #6
0
def infer_shapes(model, check_type=False):  # type: (ModelProto,bool) -> ModelProto
    if not isinstance(model, ModelProto):
        raise ValueError('Shape inference only accepts ModelProto, '
                         'incorrect type: {}'.format(type(model)))
    model_str = model.SerializeToString()
    inferred_model_str = C.infer_shapes(model_str, check_type)
    return onnx.load_from_string(inferred_model_str)
Beispiel #7
0
def infer_shapes(model: Union[ModelProto, bytes],
                 check_type: bool = False,
                 strict_mode: bool = False,
                 data_prop: bool = False) -> ModelProto:
    """Apply shape inference to the provided ModelProto.

    Inferred shapes are added to the value_info field of the graph.

    If the inferred values conflict with values already provided in the
    graph, that means that the provided values are invalid (or there is a
    bug in shape inference), and the result is unspecified.

    Arguments:
        model (Union[ModelProto, bytes], bool, bool, bool) -> ModelProto
        check_type (bool): Checks the type-equality for input and output
        strict_mode (bool): Stricter shape inference, it will throw errors if any;
            Otherwise, simply stop if any error
        data_prop (bool): Enables data propagation for limited operators to perform shape computation

    Returns:
        (ModelProto) model with inferred shape information
    """
    if isinstance(model, (ModelProto, bytes)):
        model_str = model if isinstance(model,
                                        bytes) else model.SerializeToString()
        inferred_model_str = C.infer_shapes(model_str, check_type, strict_mode,
                                            data_prop)
        return onnx.load_from_string(inferred_model_str)
    elif isinstance(model, str):
        raise TypeError(
            'infer_shapes only accepts ModelProto or bytes,'
            'you can use infer_shapes_path for the model path (String).')
    else:
        raise TypeError('infer_shapes only accepts ModelProto or bytes, '
                        'incorrect type: {}'.format(type(model)))
Beispiel #8
0
    def test_clip_aten_fallback_explicit_request(self):
        class MyClip(torch.nn.Module):
            def forward(self, x):
                return torch.clamp(x, min=-0.5, max=0.5)

        def break_is_registered_op_api(opname, domain, version):
            fake_missing_symbolics = ("clamp", )
            if opname in fake_missing_symbolics:
                return False
            return ((domain, version) in symbolic_registry._registry and opname
                    in symbolic_registry._registry[(domain, version)])

        f = io.BytesIO()
        with patch(
                "torch.onnx.symbolic_registry.is_registered_op",
                side_effect=break_is_registered_op_api,
        ):
            # Force missing symbolic for well-known op
            x = torch.randn(3, 4, requires_grad=True)
            torch.onnx.export(
                MyClip(),
                x,
                f,
                operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
            )
        onnx_model = onnx.load_from_string(f.getvalue())
        self.assertAtenOp(onnx_model, "clamp", "Tensor")
Beispiel #9
0
def optimize(
    model,
    passes=None,
    fixed_point=False
):  # type: (ModelProto, Optional[Sequence[Text]], bool) -> ModelProto
    """Apply the optimization on the serialized ModelProto.

    Arguments:
        input (ModelProto): model
        names (list of string): list of optimization names

    Return:
        return (ModelProto) optimized model
    """

    if passes is None:
        print(
            'WARNING: defualt optimization passes will be enlarged to all fuse and elimination passes in the next version'
        )
        passes = [
            'eliminate_nop_transpose', 'eliminate_nop_pad',
            'fuse_consecutive_transposes', 'fuse_transpose_into_gemm'
        ]
    if not isinstance(model, ModelProto):
        raise ValueError(
            'Optimizer only accepts ModelProto, incorrect type: {}'.format(
                type(model)))

    model_str = model.SerializeToString()
    if fixed_point:
        optimized_model_str = C.optimize_fixedpoint(model_str, passes)
    else:
        optimized_model_str = C.optimize(model_str, passes)

    return onnx.load_from_string(optimized_model_str)
Beispiel #10
0
def _export_via_onnx(model, inputs):
    # make sure all modules are in eval mode, onnx may change the training state
    #  of the moodule if the states are not consistent
    def _check_eval(module):
        assert not module.training

    model.apply(_check_eval)

    # Export the model to ONNX
    with torch.no_grad():
        with io.BytesIO() as f:
            torch.onnx.export(
                model,
                inputs,
                f,
                operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
                # verbose=True,  # NOTE: uncomment this for debugging
                # export_params=True,
            )
            onnx_model = onnx.load_from_string(f.getvalue())

    # Apply ONNX's Optimization
    all_passes = onnx.optimizer.get_available_passes()
    passes = ["fuse_bn_into_conv"]
    assert all(p in all_passes for p in passes)
    onnx_model = onnx.optimizer.optimize(onnx_model, passes)

    # Convert ONNX model to Caffe2 protobuf
    init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)

    return predict_net, init_net
Beispiel #11
0
def _export_via_onnx(model, inputs):
    from ipdb import set_trace
    set_trace()

    def _check_val(module):
        assert not module.training

    model.apply(_check_val)

    # Export the model to ONNX
    with torch.no_grad():
        with io.BytesIO() as f:
            torch.onnx.export(
                model,
                inputs,
                f,
                # verbose=True,  # NOTE: uncomment this for debugging
                export_params=True,
            )
            onnx_model = onnx.load_from_string(f.getvalue())
    # torch.onnx.export(model,  # model being run
    #                   inputs,  # model input (or a tuple for multiple inputs)
    #                   "reid_test.onnx",  # where to save the model (can be a file or file-like object)
    # export_params=True,  # store the trained parameter weights inside the model file
    # opset_version=10,  # the ONNX version to export the model to
    # do_constant_folding=True,  # whether to execute constant folding for optimization
    # input_names=['input'],  # the model's input names
    # output_names=['output'],  # the model's output names
    # dynamic_axes={'input': {0: 'batch_size'},  # variable lenght axes
    #               'output': {0: 'batch_size'}})
    # )

    # Apply ONNX's Optimization
    # all_passes = optimizer.get_available_passes()
    # passes = ["fuse_bn_into_conv"]
    # assert all(p in all_passes for p in passes)
    # onnx_model = optimizer.optimize(onnx_model, passes)

    # Convert ONNX Model to Tensorflow Model
    tf_rep = prepare(onnx_model,
                     strict=False)  # Import the ONNX model to Tensorflow
    print(tf_rep.inputs)  # Input nodes to the model
    print('-----')
    print(tf_rep.outputs)  # Output nodes from the model
    print('-----')
    # print(tf_rep.tensor_dict)  # All nodes in the model
    # """

    # install onnx-tensorflow from github,and tf_rep = prepare(onnx_model, strict=False)
    # Reference https://github.com/onnx/onnx-tensorflow/issues/167
    # tf_rep = prepare(onnx_model) # whthout strict=False leads to KeyError: 'pyfunc_0'

    # debug, here using the same input to check onnx and tf.
    # output_onnx_tf = tf_rep.run(to_numpy(img))
    # print('output_onnx_tf = {}'.format(output_onnx_tf))
    # onnx --> tf.graph.pb
    # tf_pb_path = 'reid_tf_graph.pb'
    # tf_rep.export_graph(tf_pb_path)

    return tf_rep
Beispiel #12
0
 def test_optional_output(self, module_class: Type[torch.nn.Module],
                          x_size: int):
     # Need scripting to preserve control flow for this test to be
     # meaningful.
     model = torch.jit.script(module_class())
     f = io.BytesIO()
     x = torch.ones(x_size)
     dynamic_axis_name = "condition"
     torch.onnx.export(
         model,
         (x, ),
         f,
         opset_version=15,
         # Ensure condition is not constant
         dynamic_axes={"x": {
             0: dynamic_axis_name
         }},
         input_names=["x"],
     )
     exported = onnx.load_from_string(f.getvalue())
     expected_elem_type = symbolic_helper.scalar_type_to_onnx[
         symbolic_helper.scalar_type_to_pytorch_type.index(x.dtype)].value
     expected_output_type = onnx.helper.make_optional_type_proto(
         onnx.helper.make_tensor_type_proto(expected_elem_type,
                                            (dynamic_axis_name, )))
     self.assertEqual(expected_output_type, exported.graph.output[0].type)
     for node in exported.graph.node:
         # Both branches output types should match.
         if node.op_type == "If":
             for attr in node.attribute:
                 if attr.name in ("then_branch", "else_branch"):
                     self.assertEqual(expected_output_type,
                                      attr.g.output[0].type)
Beispiel #13
0
def convert_version(model, target_version):  # type: (ModelProto, int) -> ModelProto
    if not isinstance(model, ModelProto):
        raise ValueError('VersionConverter only accepts ModelProto as model, incorrect type: {}'.format(type(model)))
    if not isinstance(target_version, int):
        raise ValueError('VersionConverter only accepts int as target_version, incorrect type: {}'.format(type(target_version)))
    model_str = model.SerializeToString()
    converted_model_str = C.convert_version(model_str, target_version)
    return onnx.load_from_string(converted_model_str)
Beispiel #14
0
def infer_shapes(model):  # type: (ModelProto) -> ModelProto
    if not isinstance(model, ModelProto):
        raise ValueError('Shape inference only accepts ModelProto, '
                         'incorrect type: {}'.format(type(model)))

    model_str = model.SerializeToString()
    inferred_model_str = C.infer_shapes(model_str)
    return onnx.load_from_string(inferred_model_str)
Beispiel #15
0
def convert_tests(testcases, sets=1):
    print("Collect {} test cases from PyTorch.".format(len(testcases)))
    failed = 0
    FunctionalModule_nums = 0
    nn_module = {}
    for t in testcases:
        test_name = get_test_name(t)
        module = gen_module(t)
        module_name = str(module).split("(")[0]
        if (module_name != "LogSoftmax"):
            continue
        if (module_name == "FunctionalModule"):
            FunctionalModule_nums += 1
        else:
            if (module_name not in nn_module):
                nn_module[module_name] = 0
        try:
            input = gen_input(t)
            f = io.BytesIO()
            torch.onnx._export(module, input, f,
                               operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)
            onnx_model = onnx.load_from_string(f.getvalue())
            onnx.checker.check_model(onnx_model)
            onnx.helper.strip_doc_string(onnx_model)
            output_dir = os.path.join(test_onnx_common.pytorch_converted_dir, test_name)

            if os.path.exists(output_dir):
                shutil.rmtree(output_dir)
            os.makedirs(output_dir)
            with open(os.path.join(output_dir, "model.onnx"), "wb") as file:
                file.write(onnx_model.SerializeToString())

            for i in range(sets):
                output = module(input)
                data_dir = os.path.join(output_dir, "test_data_set_{}".format(i))
                os.makedirs(data_dir)

                for index, var in enumerate([input]):
                    tensor = numpy_helper.from_array(var.data.numpy())
                    with open(os.path.join(data_dir, "input_{}.pb".format(index)), "wb") as file:
                        file.write(tensor.SerializeToString())
                for index, var in enumerate([output]):
                    tensor = numpy_helper.from_array(var.data.numpy())
                    with open(os.path.join(data_dir, "output_{}.pb".format(index)), "wb") as file:
                        file.write(tensor.SerializeToString())
                input = gen_input(t)
                if (module_name != "FunctionalModule"):
                    nn_module[module_name] |= 1
        except:  # noqa: E722
            traceback.print_exc()
            if (module_name != "FunctionalModule"):
                nn_module[module_name] |= 2
            failed += 1

    print("Collect {} test cases from PyTorch repo, failed to export {} cases.".format(
        len(testcases), failed))
    print("PyTorch converted cases are stored in {}.".format(test_onnx_common.pytorch_converted_dir))
    print_stats(FunctionalModule_nums, nn_module)
Beispiel #16
0
def load_model_only(path: str) -> onnx.ModelProto:
    """
     @param path path to file
     @return deserialized onnx model
     """
    with open(path, 'rb') as model_file:
        binary = model_file.read()
    model = onnx.load_from_string(binary)
    return model
Beispiel #17
0
    def test_initializer_sequence_script_model(self):
        def list_is_expected(short_list, long_list) -> bool:
            if len(short_list) > len(long_list):
                return False

            for i in range(len(short_list)):
                if short_list[i] not in long_list[i]:
                    return False

            return True

        def loop(x, y):
            for i in range(int(y)):
                x = x + i
            return x

        class MyModule(torch.nn.Module):
            def __init__(self, input_size, hidden_size, num_classes):
                super().__init__()
                self.fc1 = torch.nn.Linear(input_size, hidden_size)
                self.relu = torch.nn.ReLU()
                self.fc2 = torch.nn.Linear(hidden_size, num_classes)

            def forward(self, x, y):
                x = loop(x, y)
                out = self.fc1(x)
                out = self.relu(out)
                out = self.fc2(out)
                return out

        test_model = torch.jit.script(MyModule(3, 4, 10))
        state_dict_list = [k for (k, v) in test_model.state_dict().items()]
        named_params_list = [k for (k, v) in test_model.named_parameters()]

        x = torch.ones(2, 3, dtype=torch.float)
        y = torch.tensor(5, dtype=torch.long)
        f = io.BytesIO()

        torch.onnx.export(test_model, (x, y), f, do_constant_folding=False)
        loaded_model = onnx.load_from_string(f.getvalue())

        actual_list = [p.name for p in loaded_model.graph.initializer]
        assert list_is_expected(state_dict_list, actual_list), (
            "ScriptModel - Initializers' sequence is not as same as state_dict(). Expected: ("
            + ", ".join(state_dict_list)
            + "). Actual:("
            + ", ".join(actual_list)
            + ")."
        )
        assert list_is_expected(named_params_list, actual_list), (
            "ScriptModel - Initializers' sequence is not as same as named_parameters(). Expected: ("
            + ", ".join(named_params_list)
            + "). Actual:("
            + ", ".join(actual_list)
            + ")."
        )
Beispiel #18
0
def optimize(model, passes=[]):  # type: (ModelProto, Sequence[Text]) -> ModelProto
    if len(passes) == 0:
        passes = ['eliminate_nop_transpose',
                  'fuse_consecutive_transposes',
                  'fuse_transpose_into_gemm']
    if not isinstance(model, ModelProto):
        raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))

    model_str = model.SerializeToString()
    optimized_model_str = C.optimize(model_str, passes)
    return onnx.load_from_string(optimized_model_str)
Beispiel #19
0
        def getModelProto(self):
            proto = self._builder.getModelProto()

            # TODO (T33079): decide if we can apply this check unconditionally
            # in the underlying builder implementation
            if self._check_model:
                print("Checking model proto with onnx")
                model = onnx.load_from_string(proto)
                onnx.checker.check_model(model)

            return proto
Beispiel #20
0
def infer_shapes(model, check_type=False):  # type: (ModelProto, bool) -> ModelProto
    if isinstance(model, ModelProto):
        model_str = model.SerializeToString()
        inferred_model_str = C.infer_shapes(model_str, check_type)
        return onnx.load_from_string(inferred_model_str)
    elif isinstance(model, string_types):
        raise TypeError('infer_shapes only accepts ModelProto,'
                        'you can use infer_shapes_path for the model path (String).')
    else:
        raise TypeError('infer_shapes only accepts ModelProto, '
                         'incorrect type: {}'.format(type(model)))
Beispiel #21
0
def optimize(model, passes=[]):  # type: (ModelProto, Sequence[Text]) -> ModelProto
    if len(passes) == 0:
        passes = ['eliminate_nop_transpose',
                  'eliminate_nop_pad',
                  'fuse_consecutive_transposes',
                  'fuse_transpose_into_gemm']
    if not isinstance(model, ModelProto):
        raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))

    model_str = model.SerializeToString()
    optimized_model_str = C.optimize(model_str, passes)
    return onnx.load_from_string(optimized_model_str)
Beispiel #22
0
def convert_tests(testcases, sets=1):
    print("Collect {} test cases from PyTorch.".format(len(testcases)))
    failed = 0
    ops = set()
    for t in testcases:
        test_name = get_test_name(t)
        module = gen_module(t)
        try:
            input = gen_input(t)
            f = io.BytesIO()
            torch.onnx._export(module, input, f)
            onnx_model = onnx.load_from_string(f.getvalue())
            onnx.checker.check_model(onnx_model)
            onnx.helper.strip_doc_string(onnx_model)
            output_dir = os.path.join(test_onnx_common.pytorch_converted_dir,
                                      test_name)

            if os.path.exists(output_dir):
                shutil.rmtree(output_dir)
            os.makedirs(output_dir)
            with open(os.path.join(output_dir, "model.onnx"), "wb") as file:
                file.write(onnx_model.SerializeToString())

            for i in range(sets):
                output = module(input)
                data_dir = os.path.join(output_dir,
                                        "test_data_set_{}".format(i))
                os.makedirs(data_dir)

                for index, var in enumerate([input]):
                    tensor = numpy_helper.from_array(var.data.numpy())
                    with open(
                            os.path.join(data_dir,
                                         "input_{}.pb".format(index)),
                            "wb") as file:
                        file.write(tensor.SerializeToString())
                for index, var in enumerate([output]):
                    tensor = numpy_helper.from_array(var.data.numpy())
                    with open(
                            os.path.join(data_dir,
                                         "output_{}.pb".format(index)),
                            "wb") as file:
                        file.write(tensor.SerializeToString())
                input = gen_input(t)
        except:  # noqa: E722
            traceback.print_exc()
            failed += 1

    print(
        "Collect {} test cases from PyTorch repo, failed to export {} cases.".
        format(len(testcases), failed))
    print("PyTorch converted cases are stored in {}.".format(
        test_onnx_common.pytorch_converted_dir))
Beispiel #23
0
def parse_model(model_text: str) -> onnx.ModelProto:
    """Parse a string to build a ModelProto.

    Arguments:
        model_text (string): formatted string
    Returns:
        ModelProto
    """
    (success, msg, model_proto_str) = C.parse_model(model_text)
    if success:
        return onnx.load_from_string(model_proto_str)
    else:
        raise ParseError(msg)
Beispiel #24
0
def run_calibration():
    onnx_model = onnx.load(ONNX_PATH)
    if not onnx_model:
        print("load onnx file failed. path:", onnx_file)
        return
    print("load onnx model end")

    lmdb_env = lmdb.open(LMDB_PATH)
    if not lmdb_env:
        print("lmdb from %s is not valid" % (LMDB_PATH))
        return

    lmdb_txn = lmdb_env.begin()
    lmdb_cursor = lmdb_txn.cursor()
    datum = caffe2_legacy_pb2.CaffeDatum()
    lmdb_cursor.first()
    cal = py_onnx_cal.Calibration()
    for loop_count in range(0, ITERATION):
        for batch in range(0, BATCH):
            value = lmdb_cursor.value()
            datum.ParseFromString(value)
            label = datum.label
            float_image = np.frombuffer(datum.data, dtype=np.uint8).reshape(
                1, datum.channels, datum.height,
                datum.width).astype(np.float32)
            # Image pre-process
            print(float_image)
            crop = CropCenterOP(224, 224)
            float_image = crop(float_image)
            mean = MeanOP([104, 117, 123])
            float_data = mean(float_image)
            print("shape = ", float_data.shape)
            if (batch == 0):
                input_data = float_data
            else:
                input_data = np.append(input_data, float_data, axis=0)
            lmdb_cursor.next()
        # Feed inputs for calibration
        cal.feed('data_0', input_data)

    onnx_cal_str = cal.profile(onnx_model.SerializeToString(), ITERATION)
    onnx.save(onnx.load_from_string(onnx_cal_str), './resnet50.cal.onnx')

    # Extract outputs
    for iter in range(0, ITERATION):
        out = cal.fetch('prob_1', iter)
        if (iter == 0):
            total_outs = out
        else:
            total_outs = np.append(total_outs, out, axis=0)
    print("prob_1 = ", total_outs)
Beispiel #25
0
def export_onnx_model(model, inputs, input_names=None, output_names=None):
    """
    Trace and export a model to onnx format.

    Args:
        model (nn.Module):
        inputs (tuple[args]): the model will be called by `model(*inputs)`
        input_names (str):
        output_names (str):

    Returns:
        an onnx model
    """
    assert isinstance(model, torch.nn.Module)

    # make sure all modules are in eval mode, onnx may change the training state
    # of the module if the states are not consistent
    def _check_eval(module):
        assert not module.training

    model.apply(_check_eval)

    # Export the model to ONNX
    with torch.no_grad():
        with io.BytesIO() as f:
            torch.onnx.export(
                model,
                inputs,
                f,
                operator_export_type=OperatorExportTypes.ONNX,
                verbose=True,  # NOTE: uncomment this for debugging
                # opset_version=11,
                keep_initializers_as_inputs=True,
                input_names=input_names,
                output_names=output_names,
                # export_params=True,
            )
            onnx_model = onnx.load_from_string(f.getvalue())

    # Skip ONNX's Optimization
    return onnx_model

    # Apply ONNX's Optimization
    all_passes = onnx.optimizer.get_available_passes()
    passes = ["fuse_bn_into_conv"]
    assert all(p in all_passes for p in passes)
    onnx_model = onnx.optimizer.optimize(onnx_model, passes)
    return onnx_model
Beispiel #26
0
    def _roundtrip(self, model_name):
        model_dir = Runner(c2)._prepare_model_data(
            namedtuple('dummy', ['model_name'])(model_name))

        pb_path = os.path.join(model_dir, 'model.pb')

        before_roundtrip = onnx.load(pb_path)

        with open(pb_path, 'rb') as pb:
            after_roundtrip = onnx.load_from_string(pb.read())

        assert onnx.helper.printable_graph(before_roundtrip.graph) \
            == onnx.helper.printable_graph(after_roundtrip.graph)

        with open(pb_path, 'rb') as pb:
            assert after_roundtrip.SerializeToString() == pb.read()
Beispiel #27
0
 def __init__(self, onnx_model_proto, make_deepcopy=False):
     """Creates a ModelWrapper instance.
     onnx_model_proto can be either a ModelProto instance, or a string
     with the path to a stored .onnx file on disk, or serialized bytes.
     The make_deepcopy option controls whether a deep copy of the ModelProto
     is made internally.
     """
     if isinstance(onnx_model_proto, str):
         self._model_proto = onnx.load(onnx_model_proto)
     elif isinstance(onnx_model_proto, bytes):
         self._model_proto = onnx.load_from_string(onnx_model_proto)
     else:
         if make_deepcopy:
             self._model_proto = copy.deepcopy(onnx_model_proto)
         else:
             self._model_proto = onnx_model_proto
    def _roundtrip(self, model_name):
        model_dir = Runner(c2)._prepare_model_data(
            namedtuple('dummy', ['model_name'])(model_name))

        pb_path = os.path.join(model_dir, 'model.pb')

        before_roundtrip = onnx.load(pb_path)

        with open(pb_path, 'rb') as pb:
            after_roundtrip = onnx.load_from_string(pb.read())

        assert onnx.helper.printable_graph(before_roundtrip.graph) \
            == onnx.helper.printable_graph(after_roundtrip.graph)

        with open(pb_path, 'rb') as pb:
            assert after_roundtrip.SerializeToString() == pb.read()
Beispiel #29
0
    def test_clip_aten_fallback_due_exception(self):
        x = torch.randn(3, 4, requires_grad=True)

        def bad_clamp(g, self, min, max):
            return _onnx_unsupported("Bad boy!")

        class MyClip(torch.nn.Module):
            def forward(self, x):
                return torch.clamp(x, min=-0.5, max=0.5)

        f = io.BytesIO()
        with custom_op("aten::clamp", bad_clamp, 9):
            torch.onnx.export(MyClip(), x, f,
                              operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK)
        onnx_model = onnx.load_from_string(f.getvalue())
        self.assertAtenOp(onnx_model, "clamp", "Tensor")
def check_models(model_init, modelA_fn, modelB_fn):
    """
    for each weight tensor, check the relative error. That is, 
    | model_accl - model_no_accl |_1 / | model_accl - model_initial|_1
    """
    modelA = onnx.load(modelA_fn)
    modelB = onnx.load(modelB_fn)

    #the initial model
    modelC = onnx.load_from_string(model_init)

    for w_i, weightA in enumerate(modelA.graph.initializer):
        # We need to avoid the gradient accl initializers as these won't be present
        # in the non grad accl models.
        if (popart.reservedAcclPrefix() not in weightA.name
                and popart.reservedAccl1Prefix() not in weightA.name
                and popart.reservedAccl2Prefix() not in weightA.name
                and popart.reservedStepPrefix() not in weightA.name
                and popart.reservedAccumPrefix() not in weightA.name):
            # where A, B, C are weight tensors,
            # |A - B|_1
            l1AB = 0
            # |B - C|_1
            l1BC = 0
            # |A - C|_1
            l1AC = 0
            for d_i, dataA in enumerate(weightA.float_data):
                dataB = modelB.graph.initializer[w_i].float_data[d_i]
                dataC = modelC.graph.initializer[w_i].float_data[d_i]

                # abs diff of 2 floats
                l1AB += np.abs(dataA - dataB)
                l1BC += np.abs(dataB - dataC)
                l1AC += np.abs(dataA - dataC)

            relative_error = l1AB / (l1AC)
            print(
                f"{weightA.name}: l1AB = %.2e,  l1AC = %.2e, l1BC = %.2e, relative error = %.2e"
                % (l1AB, l1AC, l1BC, relative_error))

            # check that the weights have moved enough for this to be a valid
            assert l1AC > 1e-3, "change since start of A = %.5f" % (l1AC, )
            assert l1BC > 1e-3, "change since start of B = %.5f" % (l1BC, )

            #relative error assertion
            assert 1e-5 > relative_error, "Relative error {}".format(
                relative_error)
Beispiel #31
0
def optimize(model, passes=None, fixed_point=False):  # type: (ModelProto, Optional[Sequence[Text]], bool) -> ModelProto
    if passes is None:
        passes = ['eliminate_nop_transpose',
                  'eliminate_nop_pad',
                  'fuse_consecutive_transposes',
                  'fuse_transpose_into_gemm']
    if not isinstance(model, ModelProto):
        raise ValueError(
            'Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))

    model_str = model.SerializeToString()
    if fixed_point:
        optimized_model_str = C.optimize_fixedpoint(model_str, passes)
    else:
        optimized_model_str = C.optimize(model_str, passes)

    return onnx.load_from_string(optimized_model_str)
Beispiel #32
0
def infer_shapes(model: Union[ModelProto, bytes],
                 check_type: bool = False,
                 strict_mode: bool = False,
                 data_prop: bool = False) -> ModelProto:
    if isinstance(model, (ModelProto, bytes)):
        model_str = model if isinstance(model,
                                        bytes) else model.SerializeToString()
        inferred_model_str = C.infer_shapes(model_str, check_type, strict_mode,
                                            data_prop)
        return onnx.load_from_string(inferred_model_str)
    elif isinstance(model, str):
        raise TypeError(
            'infer_shapes only accepts ModelProto or bytes,'
            'you can use infer_shapes_path for the model path (String).')
    else:
        raise TypeError('infer_shapes only accepts ModelProto or bytes, '
                        'incorrect type: {}'.format(type(model)))
Beispiel #33
0
    def _helper_test_to_(self, cast_fn: Callable[[torch.Tensor], torch.Tensor]):
        """Helper to test aten::to(device) variants

        `cast_fn` is converted into a `torch.jit.script`. It wraps `aten::to`
        during export to preventing the devices to be hard-coded.

        Needed by detectron2 after https://github.com/facebookresearch/detectron2/pull/4132/
        """
        cast_fn = torch.jit.script(cast_fn)

        f = io.BytesIO()
        x = torch.zeros([1, 3, 32, 32])
        torch.onnx.export(cast_fn, (x,), f)
        onnx_model = onnx.load_from_string(f.getvalue())
        for n in onnx_model.graph.node:
            self.assertNotEqual(n.op_type, "To")
            self.assertNotEqual(n.op_type, "Cast")