Esempio n. 1
0
def estimate_grads(specLB, specUB, dim_samples=3):
    specLB = np.array(specLB, dtype=np.float32)
    specUB = np.array(specUB, dtype=np.float32)
    inputs = [((dim_samples - i) * specLB + i * specUB) / dim_samples for i in range(dim_samples + 1)]
    diffs = np.zeros(len(specLB))

    # refactor this out of this method
    if is_onnx:
        runnable = rt.prepare(model, 'CPU')
    elif sess is None:
        runnable = tf.Session()
    else:
        runnable = sess

    for sample in range(dim_samples + 1):
        pred = model_predict(runnable, inputs[sample])

        for index in range(len(specLB)):
            if sample < dim_samples:
                l_input = [m if i != index else u for i, m, u in zip(range(len(specLB)), inputs[sample], inputs[sample+1])]
                l_input = np.array(l_input, dtype=np.float32)
                l_i_pred = model_predict(runnable, l_input)
            else:
                l_i_pred = pred
            if sample > 0:
                u_input = [m if i != index else l for i, m, l in zip(range(len(specLB)), inputs[sample], inputs[sample-1])]
                u_input = np.array(u_input, dtype=np.float32)
                u_i_pred = model_predict(runnable, u_input)
            else:
                u_i_pred = pred
            diff = np.sum([abs(li - m) + abs(ui - m) for li, m, ui in zip(l_i_pred, pred, u_i_pred)])
            diffs[index] += diff
    return diffs / dim_samples
Esempio n. 2
0
def test_knn(iris_df):
    folder = os.environ.get("ONNXTESTDUMP", "tests_dump")
    basename = "SklearnKNeighborsClassifierMulti"
    os.makedirs(folder, exist_ok=True)

    x_columns = [
        'sepal length (cm)',
        'sepal width (cm)',
        'petal length (cm)'
    ]
    y_columns = ['plant_type']
    x_nda = iris_df[x_columns].values.astype(np.float32)
    y_1da = iris_df[y_columns].values.astype("<U10")
    model = KNeighborsClassifier()

    model.fit(x_nda, y_1da)
    prediction = [model.predict(x_nda), model.predict_proba(x_nda)]

    model_onnx = convert_sklearn(
        model=model,
        name="KNN classifier multi-class",
        initial_types=[("input", FloatTensorType([None, x_nda.shape[1]]))],
        # target_opset=TARGET_OPSET
    )

    dest_data = os.path.join(folder, f"{basename}.data.pkl")
    with open(dest_data, "wb") as data_file:
        pickle.dump(x_nda, data_file)

    dest_expected = os.path.join(folder, f"{basename}.expected.pkl")
    with open(dest_expected, "wb") as expected_file:
        pickle.dump(prediction, expected_file)

    dest_pkl = os.path.join(folder, f"{basename}.model.pkl")
    with open(dest_pkl, "wb") as pickle_file:
        pickle.dump(model, pickle_file)

    dest_onnx = os.path.join(folder, f"{basename}.model.onnx")
    with open(dest_onnx, "wb") as onnx_file:
        logging.info(f"created {onnx_file}")
        onnx_file.write(model_onnx.SerializeToString())

    onnx_graph = onnx.load(dest_onnx)

    print("doc_string={}".format(onnx_graph.doc_string))
    print("domain={}".format(onnx_graph.domain))
    print("ir_version={}".format(onnx_graph.ir_version))
    print("metadata_props={}".format(onnx_graph.metadata_props))
    print("model_version={}".format(onnx_graph.model_version))
    print("producer_name={}".format(onnx_graph.producer_name))
    print("producer_version={}".format(onnx_graph.producer_version))

    rep = backend.prepare(onnx_graph, 'CPU')
    prediction_from_saved = rep.run(x_nda)

    prediction_from_saved_df = pd.DataFrame(prediction_from_saved[1])
    prediction_from_saved_df.columns = prediction_from_saved_df.columns.map("plant_type_is_{}".format)

    prediction_from_saved_df['plant_type_pred'] = pd.Series(prediction_from_saved[0])
    assert prediction_from_saved_df.shape == (x_nda.shape[0], 4)
Esempio n. 3
0
 def testRunModel(self):
     name = get_name("mul_1.onnx")
     rep = backend.prepare(name)
     x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
     res = rep.run(x)
     output_expected = np.array([[1.0, 4.0], [9.0, 16.0], [25.0, 36.0]], dtype=np.float32)
     np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
Esempio n. 4
0
def test_abc():
    base_path = Path(__file__).parent

    filename = "examples/ONNX/abc_basic-mdf.json"
    file_path = (base_path / "../../.." / filename).resolve()

    # Load the MDF model
    mdf_model = load_mdf(str(file_path))

    # Test input
    test_input = np.array([[0, 0, 0], [1, 1, 1]], dtype=np.float32)

    # Get the result of MDF execution
    mdf_executable = EvaluableGraph(mdf_model.graphs[0], verbose=False)
    mdf_executable.evaluate(initializer={"input": test_input})
    mdf_output = mdf_executable.enodes["Cos_2"].evaluable_outputs[
        "_3"].curr_value

    # Get the translated ONNX model
    onnx_models = mdf_to_onnx(mdf_model)

    # Bluffing onnx that our model is 13 when it is actually 15. This is needed for older onnxruntime
    # installations to run this model. See https://github.com/onnx/onnx/issues/3205
    onnx_models[0].opset_import[0].version = 13

    # Get the result of running the ONNX model
    session = backend.prepare(onnx_models[0])
    onnx_output = session.run(
        test_input)  # run returns a list with the actual result and type
    onnx_res_output = np.array(onnx_output[0])

    assert np.array_equal(onnx_res_output, mdf_output)
Esempio n. 5
0
def test_ab():
    base_path = Path(__file__).parent

    filename = "examples/ONNX/ab.json"
    file_path = (base_path / "../../.." / filename).resolve()

    # Load the MDF model
    mdf_model = load_mdf(str(file_path))

    # Test input
    test_input = np.array([[0, 0, 0], [1, 1, 1]], dtype=np.float32)

    # Get the result of MDF execution
    mdf_executable = EvaluableGraph(mdf_model.graphs[0], verbose=False)
    # TODO: the int type cast is necessaryf or now because the nodes' parameters are constants and inputs must have
    #  the same type
    mdf_executable.evaluate(initializer={"input": test_input.astype(int)})
    mdf_output = mdf_executable.enodes["Mul_3"].evaluable_outputs[
        "_4"].curr_value

    # Get the translated ONNX model
    onnx_models = mdf_to_onnx(mdf_model)

    # Bluffing onnx that our model is 13 when it is actually 15. This is needed for older onnxruntime
    # installations to run this model. See https://github.com/onnx/onnx/issues/3205
    onnx_models[0].opset_import[0].version = 13

    # Get the result of running the ONNX model
    session = backend.prepare(onnx_models[0])
    onnx_output = session.run(
        test_input)  # run returns a list with the actual result and type
    onnx_res_output = np.array(onnx_output[0])
    # print(f"Output calculated by onnxruntime: {onnx_res_output} and MDF: {mdf_output.astype(float)}")

    assert np.array_equal(onnx_res_output, mdf_output)
 def testRunModelNonTensor(self):
     name = self.get_name("pipeline_vectorize.onnx")
     rep = backend.prepare(name)
     x = {0: 25.0, 1: 5.13, 2: 0.0, 3: 0.453, 4: 5.966}
     res = rep.run(x)
     output_expected = np.array([[49.752754]], dtype=np.float32)
     np.testing.assert_allclose(output_expected, res[0], rtol=1e-05, atol=1e-08)
Esempio n. 7
0
    def testRunModelProto(self):
        name = datasets.get_example("logreg_iris.onnx")
        model = load(name)

        rep = backend.prepare(model)
        x = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=np.float32)
        res = rep.run(x)
        output_expected = np.array([0, 0, 0], dtype=np.float32)
        np.testing.assert_allclose(output_expected,
                                   res[0],
                                   rtol=1e-05,
                                   atol=1e-08)
        output_expected = [{
            0: 0.950599730014801,
            1: 0.027834169566631317,
            2: 0.02156602405011654
        }, {
            0: 0.9974970817565918,
            1: 5.6299926654901356e-05,
            2: 0.0024466661270707846
        }, {
            0: 0.9997311234474182,
            1: 1.1918064757310276e-07,
            2: 0.00026869276189245284
        }]

        check_list_of_map_to_float(self, output_expected, res[1])
Esempio n. 8
0
 def _create_session_via_backend_api(self, model):
     self.device = re.match(
         DEVICE_REGEX,
         self.get_value_from_config('device').lower()).group('device')
     beckend_rep = backend.prepare(model=str(model),
                                   device=self.device.upper())
     return beckend_rep._session  # pylint: disable=W0212
Esempio n. 9
0
 def _create_session_via_backend_api(self, model):
     device = self.get_value_from_config('device') or 'cpu'
     device_match = re.match(DEVICE_REGEX, device.lower())
     if not device_match:
         raise ConfigError('unknown device: {}'.format(device))
     self.device = device_match.group('device')
     beckend_rep = backend.prepare(model=str(model), device=self.device.upper())
     return beckend_rep._session  # pylint: disable=W0212
Esempio n. 10
0
def run_convlution_op(x, w, conv_attribute, output_shape):

    print(conv_attribute)

    # Create inputs (ValueInfoProto)
    X = helper.make_tensor_value_info('X', TensorProto.FLOAT, list(x.shape))
    input_w = helper.make_tensor_value_info('conv_w', TensorProto.FLOAT,
                                            list(w.shape))

    # Create output (ValueInfoProto)
    Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, output_shape)

    # Create initializer
    conv_weight = w.flatten()
    conv_weight = list(conv_weight)
    conv_w = helper.make_tensor(name="conv_w",
                                data_type=TensorProto.FLOAT,
                                dims=list(w.shape),
                                vals=conv_weight,
                                raw=False)

    # create node
    node = onnx.helper.make_node(op_type='Conv',
                                 inputs=['X', 'conv_w'],
                                 outputs=['Y'],
                                 kernel_shape=conv_attribute["kernel_shape"],
                                 pads=conv_attribute["pads"],
                                 strides=conv_attribute["strides"],
                                 group=conv_attribute['group'],
                                 dilations=conv_attribute['dilations'])

    # create graph
    graph_def = helper.make_graph(nodes=[node],
                                  name='test-model',
                                  inputs=[X, input_w],
                                  outputs=[Y],
                                  initializer=[conv_w])

    # create model
    model_def = helper.make_model(graph_def, producer_name='sun')
    onnx.checker.check_model(model_def)

    # model = onnx.load(onnx_file)
    session = backend.prepare(model_def)
    output = session.run(x)
    output = np.array(output[0])
    print(output)
    print(output.shape)

    #save onnx model
    onnx.save(model_def, 'convlution.onnx')
    print("save file convlution.onnx")
    output = np.array(output)
    np.save('convlution', output)
    print("save output convlution.npy")

    return output
Esempio n. 11
0
def get_feature(onnx_file, aligned, batching=False):
    if not batching:
        # ONE INPUT
        input_blob = np.expand_dims(aligned, axis=0).astype(np.float32)  #NCHW
        # ONE INPUT
    else:
        # BATCHING
        input_blob = np.expand_dims(aligned, axis=0).astype(np.float32)  #NCHW
        input_blob = np.squeeze(input_blob)
        # BATCHING
    onnx_model = onnx.load(onnx_file)
    ort_session = backend.prepare(onnx_model, 'GPU')
    outputs = ort_session.run(input_blob)

    return outputs
Esempio n. 12
0
def run_model(data: bytearray, model: ModelProto):
    """Run model and check result"""

    x = np.array(data, dtype=np.int64)
    i = np.array([0], dtype=np.int64)
    keep_going = np.array([True], dtype=np.bool)
    max_index = np.array([32], dtype=np.int64)
    model_rep = prepare(model)
    out = model_rep.run([x, i, keep_going, max_index])[1].reshape((1, 30))[0]
    assert np.array_equal(
        out,
        np.array(
            [
                16780,
                9831,
                2538,
                14031,
                8110,
                19136,
                17547,
                13929,
                16911,
                20265,
                15014,
                24203,
                9238,
                12759,
                17883,
                5871,
                15265,
                13222,
                11014,
                8290,
                7843,
                16989,
                7222,
                20835,
                15431,
                4554,
                8498,
                11543,
                6214,
                7169,
            ],
            dtype=np.int64,
        ),
    )
    print(f"Flag: ctfzone\x7b{data.decode()}\x7d")
Esempio n. 13
0
def run_relu_op(x):

    # Create inputs (ValueInfoProto)
    X = helper.make_tensor_value_info('X', TensorProto.FLOAT, list(x.shape))

    # Create output (ValueInfoProto)
    # output形状应该和input相同,
    output_shape = list(x.shape)
    Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, output_shape)

    # create node
    node = onnx.helper.make_node(
        op_type='Relu',
        inputs=['X'],
        outputs=['Y'],
    )

    # create graph
    graph_def = helper.make_graph(
        nodes=[node],
        name='test-model',
        inputs=[X],
        outputs=[Y],
    )

    # create model
    model_def = helper.make_model(graph_def, producer_name='sun')
    onnx.checker.check_model(model_def)

    # model = onnx.load(onnx_file)
    session = backend.prepare(model_def)
    output = session.run(x)
    output = np.array(output[0])
    print(output)
    print(output.shape)

    #save onnx model
    onnx.save(model_def, 'relu.onnx')
    print("save file relu.onnx")
    output = np.array(output)
    np.save('relu', output)
    print("save output relu.npy")

    return output
Esempio n. 14
0
def get_feature(aligned, batching=False):
    if not batching:
        # ONE INPUT
        input_blob = np.expand_dims(aligned, axis=0).astype(np.float32)  #NCHW
        # ONE INPUT
    else:
        # BATCHING
        input_blob = np.expand_dims(aligned, axis=0).astype(np.float32)  #NCHW
        input_blob = np.squeeze(input_blob)
        # BATCHING
    onnx_model = onnx.load(onnx_file)
    ''' You can delete this one if not works '''
    # iterate through inputs of the graph
    # for input in onnx_model.graph.input:
    #     print (input.name, end=": ")
    #     # get type of input tensor
    #     tensor_type = input.type.tensor_type
    #     # check if it has a shape:
    #     if (tensor_type.HasField("shape")):
    #         # iterate through dimensions of the shape:
    #         for d in tensor_type.shape.dim:
    #             # the dimension may have a definite (integer) value or a symbolic identifier or neither:
    #             if (d.HasField("dim_value")):
    #                 print (d.dim_value, end=", ")  # known dimension
    #             elif (d.HasField("dim_param")):
    #                 print (d.dim_param, end=", ")  # unknown dimension with symbolic name
    #             else:
    #                 print ("?", end=", ")  # unknown dimension with no name
    #     else:
    #         print ("unknown rank", end="")
    #     print()
    ''' You can delete this one if not works '''

    ort_session = backend.prepare(onnx_model, 'GPU')
    #ort_session.get_providers()
    #ort.session.set_providers(['CUDAExecutionProvider'])
    outputs = ort_session.run(input_blob)
    # cnt = 0
    # print(len(outputs))
    #for output in outputs:
    #    print('Output '+str(cnt), output.shape)
    #    cnt+=1
    # print('im_scale:',scales)
    return outputs
Esempio n. 15
0
def _estimate_grads(specLB, specUB, model, dim_samples=3, input_shape=(1, )):
    # Estimate gradients using central difference quotient and average over dim_samples+1 in the range of the input bounds
    # Very computationally costly
    specLB = np.array(specLB, dtype=np.float32)
    specUB = np.array(specUB, dtype=np.float32)
    inputs = [(((dim_samples - i) * specLB + i * specUB) /
               dim_samples).reshape(*input_shape)
              for i in range(dim_samples + 1)]
    diffs = np.zeros(len(specLB))

    # ONNX assumed
    runnable = rt.prepare(model, 'CPU')

    for sample in range(dim_samples + 1):
        pred = _onnx_predict(runnable, inputs[sample])

        for index in range(len(specLB)):
            if sample < dim_samples:
                l_input = [
                    m if i != index else u for i, m, u in zip(
                        range(len(specLB)), inputs[sample], inputs[sample + 1])
                ]
                l_input = np.array(l_input, dtype=np.float32)
                l_i_pred = _onnx_predict(runnable, l_input)
            else:
                l_i_pred = pred
            if sample > 0:
                u_input = [
                    m if i != index else l for i, m, l in zip(
                        range(len(specLB)), inputs[sample], inputs[sample - 1])
                ]
                u_input = np.array(u_input, dtype=np.float32)
                u_i_pred = _onnx_predict(runnable, u_input)
            else:
                u_i_pred = pred
            diff = np.sum([
                abs(li - m) + abs(ui - m)
                for li, m, ui in zip(l_i_pred, pred, u_i_pred)
            ])
            diffs[index] += diff
    return diffs / dim_samples
Esempio n. 16
0
def onnx_run(OnnxName, x):
    model_name = OnnxName
    model = onnx.load(model_name)
    print("load onnx model : ", model_name)
    print("input data shepe: ", x.dtype, x.shape)

    print(onnx.checker.check_model(model))

    # # model shape_inference
    # shape_model = shape_inference.infer_shapes(model)
    # onnx.checker.check_model(shape_model)
    # onnx.save(shape_model,  test_dir + "simple_net_shaped.onnx")

    # # model optimizer
    # passes = None
    # opti_mode = onnx.optimizer.optimize(shape_model, passes)
    # onnx.save(shape_model,  test_dir + "simple_net_opt.onnx")

    session = backend.prepare(model)

    output = session.run(x)
    output = np.array(output[0])
    return output
Esempio n. 17
0
    def test(self):
        _model = onnx.load(self.model_path)
        print("Total node count in model: ", len(_model.graph.node))

        # The input tensors could be provided as constants
        # The example below illustrates such a dictionary could be
        # provided for models with unknown input shapes. Since
        # mnist has known input shape, we don't provide input tensors.
        # input_tensors = {'Input3': tf.constant(0, dtype = tf.float32,
        #                    name='Input3',
        #                    shape=[1, 1, 28, 28])}
        input_tensors = {}
        tensor_dict = otf.prepare(_model,
                                  gen_tensor_dict=True,
                                  input_tensor_dict=input_tensors).tensor_dict
        more_outputs = []
        output_to_check = []
        for node in _model.graph.node:
            # add the first output of each node to the model output
            output_tensor = None
            for i in range(len(_model.graph.value_info)):
                if _model.graph.value_info[i].name == node.output[0]:
                    output_tensor = _model.graph.value_info[i]

            for i in range(len(_model.graph.initializer)):
                if _model.graph.initializer[i].name == node.output[0]:
                    output_tensor = _model.graph.initializer[i]

            # assume the first output is a tensor
            tensor = tensor_dict[node.output[0]]
            output_tensor = helper.make_tensor_value_info(
                node.output[0], data_type.tf2onnx(tensor.dtype),
                tensor.shape) if output_tensor is None else output_tensor
            more_outputs.append(output_tensor)
            output_to_check.append(node.output[0])
        _model.graph.output.extend(more_outputs)

        tf_rep = otf.prepare(_model)
        rt_rep = ort.prepare(_model)

        # prepare input data
        mnist = tf.keras.datasets.mnist
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train, x_test = x_train / 255.0, x_test / 255.0
        sample = x_test[:1].reshape(1, 1, 28, 28).astype(np.float32)

        inputs = [sample]
        my_out = tf_rep.run(inputs)
        rt_out = rt_rep.run(inputs)

        for op in output_to_check:
            for i in range(len(my_out)):
                # find the index of output in the list
                if my_out[op] is my_out[i]:

                    try:
                        np.savetxt(op.replace("/", "__") + ".rt",
                                   rt_out[i].flatten(),
                                   delimiter='\t')
                        np.savetxt(op.replace("/", "__") + ".tf",
                                   my_out[i].flatten(),
                                   delimiter='\t')
                        np.testing.assert_allclose(my_out[i],
                                                   rt_out[i],
                                                   rtol=1e-2)
                        print(
                            op,
                            "results of this layer are correct within tolerence."
                        )
                    except Exception as e:
                        np.set_printoptions(threshold=np.inf)
                        mismatch_percent = (find_between(
                            str(e), "(mismatch", "%)"))
                        print(
                            op, "mismatch with percentage {} %".format(
                                mismatch_percent))
Esempio n. 18
0
import numpy as np
from onnxruntime import datasets
from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument
import onnxruntime.backend as backend
from onnx import load

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
from onnxruntime import get_device
device = get_device()

name = datasets.get_example("logreg_iris.onnx")
model = load(name)

rep = backend.prepare(model, device)
x = np.array([[-1.0, -2.0]], dtype=np.float32)
try:
    label, proba = rep.run(x)
    print("label={}".format(label))
    print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
    print(e)

########################################
# The backend can also directly load the model
# without using *onnx*.

rep = backend.prepare(name, device)
x = np.array([[-1.0, -2.0]], dtype=np.float32)
try:
Esempio n. 19
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    # step 1.1, create pretrained model to transform to onnx model
    print("=> creating model '{}'".format(args.arch))
    if args.model == '':
        if args.pretrained.lower() not in ['false', 'none', 'not', 'no', '0']:
            print("=> using pre-trained parameters '{}'".format(
                args.pretrained))
            Model = pretrainedmodels.__dict__[args.arch](
                num_classes=1000, pretrained=args.pretrained)
        else:
            Model = pretrainedmodels.args.arch

    if args.model == 'mobilenet_v2' or args.model == 'mobilenetv2':
        Model = mobilenet_v2(pretrained=True)
    if args.model == 'googlenet':
        Model = googlenet(pretrained=True)
    if args.model == 'shufflenet_v2' or args.model == 'shufflenetv2':
        Model = shufflenet_v2_x0_5(pretrained=True)

    # Use this an input trace to serialize the model
    input_shape = (3, int(args.h), int(args.w))
    if args.model == '':
        modelname = args.arch
    else:
        modelname = args.model
    model_onnx_path = "models_converted/%s-pytorch.onnx" % modelname
    model = Model
    if args.model == '':
        model.train(False)

    # step 1.2 Export the model to an ONNX file
    dummy_input = Variable(torch.randn(1, *input_shape))

    torch.save(model, 'models_converted/%s.path' % modelname)

    torch_onnx.export(model, dummy_input, model_onnx_path, verbose=False)
    print('++++++++++++++++++++++++++++++++++++++++++++++++++++++')
    print("=> Export of %s-pytorch.onnx complete!" % modelname)
    print('++++++++++++++++++++++++++++++++++++++++++++++++++++++')

    if args.model != '':
        print('The checking processing only support the <arch> mode.')
    else:
        # step 2, create onnx_model using onnxruntime as backend. check if right and export graph.
        image_path = 'data/2127.jpg'

        # output_pytorch, img_np = modelhandle.process(image)

        load_img = pretrainedmodels.utils.LoadImage()

        # transformations depending on the model
        # rescale, center crop, normalize, and others (ex: ToBGR, ToRange255)
        tf_img = pretrainedmodels.utils.TransformImage(model)

        # path_img = 'data/cat.jpg'

        input_img = load_img(image_path)
        input_tensor = tf_img(
            input_img)  # 3x400x225 -> 3x299x299 size may differ
        input_tensor = input_tensor.unsqueeze(0)  # 3x299x299 -> 1x3x299x299
        input = torch.autograd.Variable(input_tensor, requires_grad=False)

        # step 2.1 output the result of the test with pytorch
        output_pytorch = model(input)  # 1x1000
        # print('output_pytorch = {}'.format(output_pytorch))

        # step 2.2 output the result of test with onnx
        img_np = input.cpu().detach().numpy()
        print('=> The size of input is ', img_np.shape)
        onnx_model = onnx.load(model_onnx_path)
        rep = prepare(onnx_model, strict=False)
        output_onnx = rep.run(img_np)
        output_onnx = np.mean(output_onnx, axis=1)
        # print(output_onnx_tf)
        # print('output_onnx_tf = {}'.format(output_onnx))

        #step 2.3 output the different of result between two models
        diff = output_onnx - output_pytorch.detach().numpy()
        diff = np.mean(diff, axis=1)
        print('++++++++++++++++++++++++++++++++++++++++++++++++++++++')
        print("=> the precentage of different is %f " % diff[0])
        if diff <= 0.01:
            print("=> Model Exporting is Successful")
            print(
                "=> Please check the model in direction 'models_converted/%s-pytorch.onnx'"
                % modelname)
        else:
            print('=> Model Exporitng may have problem, please check again')
        print('++++++++++++++++++++++++++++++++++++++++++++++++++++++')

        n = args.result
        if n == 'Y' or n == 'y':
            print('output_pytorch = {}'.format(output_pytorch))
            print('output_onnx_tf = {}'.format(output_onnx))
Esempio n. 20
0
"""
import skl2onnx
import onnxruntime
import onnx
import sklearn
import numpy
from onnxruntime import get_device
import numpy as np
from onnxruntime import datasets
import onnxruntime.backend as backend
from onnx import load

name = datasets.get_example("logreg_iris.onnx")
model = load(name)

rep = backend.prepare(model, 'CPU')
x = np.array(
    [[-1.0, -2.0, 5.0, 6.0], [-1.0, -2.0, -3.0, -4.0], [-1.0, -2.0, 7.0, 8.0]],
    dtype=np.float32)
label, proba = rep.run(x)
print("label={}".format(label))
print("probabilities={}".format(proba))

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
print(get_device())

########################################
# The backend can also directly load the model
# without using *onnx*.
Esempio n. 21
0
import numpy as np
import onnx
import onnxruntime.backend as backend

# import caffe2.python.onnx.backend as backend
import os, sys

if len(sys.argv) != 2:
    print("Usage:", sys.argv[0], " OnnxModel")
    sys.exit(-1)

model = onnx.load(sys.argv[1])
session = backend.prepare(model, strict=False)

# get input data
# x = np.random.randn(1, 3, 224, 224).astype(np.float32)
x = np.load("test_1_3_224_224.npy").astype(np.float32)
print(x.shape)
print(x)

# Run the model on the backend

output = session.run(x)
# output = np.array(output)
# print(output.shape)
print(output)
Esempio n. 22
0
 def __init__(self, modelfile=onnx, device='cuda'):
     model = load(modelfile)
     self.rep = backend.prepare(model)
Esempio n. 23
0
def run_model_test(model: ModelProto):
    data = bytearray(("1" * 32).encode())
    x = np.array(data, dtype=np.int64)
    i = np.array([0], dtype=np.int64)
    keep_going = np.array([True], dtype=np.bool)
    max_index = np.array([32], dtype=np.int64)
    model_rep = prepare(model)
    standard = model_rep.run([x, i, keep_going, max_index])[1].reshape(
        (1, 30))[0]
    res = []
    for c in range(0, 32):
        inp = "1" * c + "2" + "1" * (31 - c)
        data = bytearray((inp).encode())
        x = np.array(data, dtype=np.int64)
        i = np.array([0], dtype=np.int64)
        keep_going = np.array([True], dtype=np.bool)
        max_index = np.array([32], dtype=np.int64)
        model_rep = prepare(model)
        out = model_rep.run([x, i, keep_going, max_index])[1].reshape(
            (1, 30))[0]
        buf = (out - standard)
        res.append(buf)
        print(str(c) + ": " + str(buf))
    fin = [
        16780,
        9831,
        2538,
        14031,
        8110,
        19136,
        17547,
        13929,
        16911,
        20265,
        15014,
        24203,
        9238,
        12759,
        17883,
        5871,
        15265,
        13222,
        11014,
        8290,
        7843,
        16989,
        7222,
        20835,
        15431,
        4554,
        8498,
        11543,
        6214,
        7169,
    ]
    X = [z3.Int('x%s' % i) for i in range(32)]
    s = z3.Solver()
    for i in range(len(res[0])):
        string = ""
        for b in range(len(res)):
            if (res[b][i] != 0):
                string += f"{res[b][i]}*X[{b}]+"
        string = string[:-1] + f"=={fin[i]}"
        s.add(eval(string))
    for i in range(32):
        s.add(X[i] >= 32, X[i] < 126)
    print(s.check())
    for i in range(32):
        print(chr(int(s.model()[X[i]].as_string())), end="")
    print()
Esempio n. 24
0
                                 dtype=np.float32).reshape([1, 28, 28, 1])

            if is_onnx:
                input = input.transpose(0, 3, 1, 2)
                for name, shape in output_info:
                    out_node = helper.ValueInfoProto(type=helper.TypeProto())
                    out_node.name = name
                    out_node.type.tensor_type.elem_type = model.graph.output[
                        0].type.tensor_type.elem_type
                    if len(shape) == 4:
                        shape = [shape[0], shape[3], shape[1], shape[2]]
                    for dim_value in shape:
                        dim = out_node.type.tensor_type.shape.dim.add()
                        dim.dim_value = dim_value
                    model.graph.output.append(out_node)
                runnable = rt.prepare(model, 'CPU')
                pred = runnable.run(input)
                #print(pred)
            else:
                if not (is_saved_tf_model or is_pb_file):
                    input = np.array(test_input, dtype=np.float32)
                output_names = [e[0] for e in output_info]
                pred = sess.run(
                    get_out_tensors(output_names),
                    {sess.graph.get_operations()[0].name + ':0': input})
                #print(pred)
            pred_eran = np.asarray([(i + j) / 2
                                    for i, j in zip(nlb[-1], nub[-1])])
            pred_model = np.asarray(pred[-1]).reshape(-1)
            if len(pred_eran) != len(pred_model):
                tested_file.write(', '.join([
        results[file] = node

        print("total: ", total_num)
        print("top1_accuracy_rate: ", top1_num / total_num)
        print("top5_accuracy_rate: ", top5_num / total_num)

    sorted_results = sorted(results.keys())
    for i in sorted_results:
        temp = "img[%03s]  lable[%-3s]  result[%-19s]   top1[%s]  top5[%s]"%\
            (i.split('.')[0],  results[i]["label"], results[i]["result"], results[i]["top1"], results[i]["top5"])
        print(temp)

    print("test dir: ", test_dir)
    print("total: ", total_num)
    print("top1 hit: ", top1_num)
    print("top5 hit: ", top5_num)
    print("top1_accuracy_rate: ", top1_num / total_num)
    print("top5_accuracy_rate: ", top5_num / total_num)


if __name__ == "__main__":

    if len(sys.argv) != 3:
        print("Usage:", sys.argv[0], " OnnxModel  testFileDir")
        sys.exit(-1)

    file_path = sys.argv[2]
    model = onnx.load(sys.argv[1])
    session = backend.prepare(model)
    onnx_ference(sys.argv[2])
Esempio n. 26
0
def run_bn_op(x, sacle, b, mean, var, attribute):
    print(attribute)
    # Create inputs (ValueInfoProto)
    X = helper.make_tensor_value_info('X', TensorProto.FLOAT, list(x.shape))
    input_sacle = helper.make_tensor_value_info('scale_scale', TensorProto.FLOAT, list(sacle.shape))
    input_b = helper.make_tensor_value_info('scale_b', TensorProto.FLOAT, list(b.shape))
    input_mean = helper.make_tensor_value_info('bn_mean', TensorProto.FLOAT, list(mean.shape))
    input_var = helper.make_tensor_value_info('bn_var', TensorProto.FLOAT, list(var.shape))

    # Create output (ValueInfoProto)
    # output形状应该和input相同,
    output_shape = list(x.shape)
    Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, output_shape)

    # Create initializer
    sacle_data = sacle.flatten()
    sacle_data = list(sacle_data)
    init_scale = helper.make_tensor(
        name = "scale_scale",
        data_type = TensorProto.FLOAT,
        dims = list(sacle.shape),
        vals = sacle_data, 
        raw = False
    )

    b_data = b.flatten()
    b_data = list(b_data)
    init_b = helper.make_tensor(
        name = "scale_b",
        data_type = TensorProto.FLOAT,
        dims = list(b.shape),
        vals = b_data, 
        raw = False
    )

    mean_data = mean.flatten()
    mean_data = list(mean_data)
    init_mean = helper.make_tensor(
        name = "bn_mean",
        data_type = TensorProto.FLOAT,
        dims = list(sacle.shape),
        vals = mean_data, 
        raw = False
    )

    var_data = var.flatten()
    var_data = list(var_data)
    init_var = helper.make_tensor(
        name = "bn_var",
        data_type = TensorProto.FLOAT,
        dims = list(var.shape),
        vals = var_data, 
        raw = False
    )

    # create node
    node = onnx.helper.make_node(
        op_type = 'BatchNormalization',
        inputs=['X', 'scale_scale', 'scale_b', 'bn_mean', 'bn_var'],
        outputs=['Y'],
        epsilon=attribute["epsilon"],
        momentum=attribute["momentum"],
    )

    # create graph
    graph_def = helper.make_graph(
        nodes = [node],
        name = 'test-model',
        inputs = [X, input_sacle, input_b, input_mean, input_var],
        outputs = [Y],
        initializer = [init_scale, init_b, init_mean, init_var]
    )

    # create model
    model_def = helper.make_model(graph_def, producer_name='sun')
    onnx.checker.check_model(model_def)

    # model = onnx.load(onnx_file)
    session = backend.prepare(model_def)
    output = session.run(x)
    output = np.array(output[0])
    print(output)
    print(output.shape)

    #save onnx model
    onnx.save(model_def, 'batchNormalization.onnx')
    print("save file batchNormalization.onnx")
    output = np.array(output)
    np.save('batchNormalization', output)
    print("save output batchNormalization.npy")

    return output