Exemple #1
0
    def eval_onnx_node(self, no_check_set):
        """Run a Caffe2 program using their ONNX backend.

        Prior to running the backend, use the Paddle scope to construct
        ONNX ops and prepare the inputs and output values based on ONNX
        compatibility.
        """
        # Convert inputs and outputs to ONNX tensors.
        # Use the Paddle fetch_list to prepare the outputs.
        inputs = [
            paddle_variable_to_onnx_tensor(v, self.block)
            for v in self.feed_map
        ]

        fetch_target_names = [
            fetch_target.name for fetch_target in self.fetch_list \
            if fetch_target.name not in no_check_set
        ]
        outputs = [
            paddle_variable_to_onnx_tensor(v, self.block)
            for v in fetch_target_names
        ]
        # Construct the ONNX model using paddle-onnx.
        onnx_node = ops.node_maker[self.op_type](operator=self.op,
                                                 block=self.block)

        node_list = list(onnx_node) if isinstance(onnx_node,
                                                  tuple) else [onnx_node]

        for node in node_list:
            check_node(node)
        #onnx_graph = make_graph(node_list, self.op_type, inputs, vars)
        onnx_graph = make_graph(node_list, self.op_type, inputs, outputs)
        onnx_model = make_model(onnx_graph, producer_name='unittest')

        # Expand input dictionary if there are tensor arrays
        input_map = {}
        for v in self.inputs:
            if isinstance(self.inputs[v], list):
                input_map.update(self.inputs[v])
            else:
                input_map[v] = self.inputs[v]

        # Run the Caffe2Backend with the ONNX model.
        rep = Caffe2Backend.prepare(onnx_model, device='CPU')
        in_vals = [input_map[input.name] for input in inputs]
        outs = rep.run(in_vals)
        return outs
Exemple #2
0
def validate(args):
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    [fluid_infer_program, feed_target_names,
     fetch_targets] = fluid.io.load_inference_model(args.fluid_model, exe)

    input_shapes = [
        fluid_infer_program.global_block().var(var_name).shape
        for var_name in feed_target_names
    ]
    input_shapes = [
        shape if shape[0] > 0 else (args.batch_size, ) + shape[1:]
        for shape in input_shapes
    ]

    # Generate dummy data as inputs
    inputs = [
        (args.b - args.a) * np.random.random(shape).astype("float32") + args.a
        for shape in input_shapes
    ]

    # Fluid inference 
    fluid_results = exe.run(fluid_infer_program,
                            feed=dict(zip(feed_target_names, inputs)),
                            fetch_list=fetch_targets)

    # Remove these prints some day
    print("Inference results for fluid model:")
    print(fluid_results)
    print('\n')

    # ONNX inference, using caffe2 as the backend
    onnx_model = load(args.onnx_model)
    rep = Caffe2Backend.prepare(onnx_model, device='CPU')
    onnx_results = rep.run(inputs)
    print("Inference results for ONNX model:")
    print(onnx_results)
    print('\n')

    for ref, hyp in zip(fluid_results, onnx_results):
        np.testing.assert_almost_equal(ref, hyp, decimal=args.expected_decimal)
    print("The exported model achieves {}-decimal precision.".format(
        args.expected_decimal))
def debug_model(op_list, op_trackers, nms_outputs, args):

    return_numpy = not args.return_variable
    reader = None
    runner_type = "caffe2"
    runner = None
    if args.check_task == "image_classification":
        reader = image_classification_random_reader
    elif args.check_task == "image_detection_yolo":
        detection = YoloReader(args.image_path)
        reader = detection.reader
        runner_type = "onnxruntime"
    elif args.check_task == "image_detection_ssd":
        detection = SSDReader(args.image_path)
        reader = detection.reader
        runner_type = "onnxruntime"
    else:
        raise Exception(
            "Now just support the image_classification and image_detection_ssd and image_detection_yolo task"
        )

    feed_var_name = args.name_prefix + "feed"
    fetch_var_name = args.name_prefix + "fetch"
    # start check the op test
    print("--------------------START CHECK TEST OPS!---------------------")
    for op_name in op_list:
        print("start check the op: %s" % (op_name))
        op_test_name = TEST + op_name + PY
        run_script = "python " + op_test_name
        return_code = os.system(run_script)
        if return_code != 0:
            raise Exception("The op %s test check failed!" % (op_name))
    print("----------------------CHECK TEST OPS OK!-----------------------")

    # In some tools, examples(Tf2Onnx, Caffe2Onnx), therse tools just check the last layer output,
    # we will check all layers output. Just ensure the robustness of Paddle2Onnx
    # start check the output of op
    print("--------------------START CHECK OPS OUTPUT!--------------------")
    # get the intermediate result of fluid_model & onnx model
    fluid_intermedidate_target_names = []
    op2out = dict()
    out2op = dict()
    for tracker in op_trackers:
        last_node = tracker.op_node[0]
        outputs = last_node.output
        op2out[tracker] = outputs
        for output in outputs:
            out2op[output] = tracker
        fluid_intermedidate_target_names.extend(outputs)

    #fluid_intermedidate_target_names = fluid_intermedidate_target_names[:]
    # load the paddle and onnx model
    # init the fluid executor

    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    feed_target_names = None
    if len(args.fluid_model_name) != 0 and len(args.fluid_params_name) != 0:
        [fluid_infer_program, feed_target_names, fetch_targets
         ] = fluid.io.load_inference_model(args.fluid_model, exe,
                                           args.fluid_model_name,
                                           args.fluid_params_name)
    else:
        [fluid_infer_program, feed_target_names,
         fetch_targets] = fluid.io.load_inference_model(args.fluid_model, exe)
    fetch_target_names = [target.name for target in fetch_targets]
    fluid_intermedidate_target_names = []
    fluid_intermedidate_target_names.extend(fetch_target_names)
    # in this section, wo will set the varaiable we want to get
    global_block = fluid_infer_program.global_block()

    fetch_list = [global_block.var(name) for name in fluid_intermedidate_target_names\
                 if global_block.has_var(name)]
    fluid_intermedidate_target_names = [var.name for var in fetch_list]

    # load the onnx model and init the onnx executor
    onnx_model = load(args.onnx_model)
    # user define the fetch list
    onnx_model = onnx_user_define_fetch_list(onnx_model, global_block,
                                             fluid_intermedidate_target_names)
    user_define_fetch_list(fluid_infer_program,
                           fluid_intermedidate_target_names, fetch_var_name)
    if runner_type == "caffe2":
        from caffe2.python.onnx.backend import Caffe2Backend
        runner = Caffe2Backend.prepare(onnx_model, device='CPU')
    for inputs in reader(fluid_infer_program,\
        feed_target_names):
        fluid_results = exe.run(fluid_infer_program,
                                feed=dict(zip(feed_target_names, inputs)),
                                fetch_list=fetch_list,
                                feed_var_name=feed_var_name,
                                fetch_var_name=fetch_var_name,
                                return_numpy=return_numpy)
        print("the fluid results len:%d" % (len(fluid_results)))
        if runner == None:
            #save model to tests dir and run python script
            with open("tests/nms_test.onnx", 'wb') as f:
                f.write(onnx_model.SerializeToString())
            f.close()
            with open("tests/inputs_test.pkl", 'wb') as f:
                pickle.dump(dict(zip(feed_target_names, inputs)), f)
            f.close()
            ret = os.system("python tests/onnx_runtime.py %s %s" %
                            (False, False))
            with open("tests/outputs_test.pkl", "rb") as f:
                onnx_results = pickle.load(f)
            f.close()
        else:
            onnx_results = runner.run(inputs)
        print("the onnx_results len:%d" % (len(onnx_results)))
        err_ratio = compare_fluid_onnx_results(
            fluid_results, onnx_results, fluid_intermedidate_target_names,
            nms_outputs, return_numpy, args)
    if err_ratio > 0.01:
        raise Exception("The result between onnx and paddle has difference")
Exemple #4
0
ori_scores, ori_boxes = net(dummy_input)
scores, boxes = torch.onnx._export(net,
                                   dummy_input,
                                   model_path,
                                   verbose=True,
                                   output_names=['scores', 'boxes'])
np.testing.assert_almost_equal(ori_scores.data.cpu().numpy(),
                               scores.data.cpu().numpy(),
                               decimal=3)
np.testing.assert_almost_equal(ori_boxes.data.cpu().numpy(),
                               boxes.data.cpu().numpy(),
                               decimal=3)

model = onnx.load(model_path)
# init_net, predict_net = c2.onnx_graph_to_caffe2_net(model)
prepared_backend = c2.prepare(model, device="CUDA")
W = {model.graph.input[0].name: dummy_input.data.cpu().numpy()}
c2_out = prepared_backend.run(W)[0]

c2_workspace = prepared_backend.workspace
c2_model = prepared_backend.predict_net

# Now import the caffe2 mobile exporter
from caffe2.python.predictor import mobile_exporter

# call the Export to get the predict_net, init_net. These nets are needed for running things on mobile
init_net, predict_net = mobile_exporter.Export(c2_workspace, c2_model,
                                               c2_model.external_input)

print(
    f"Save the model in binary format to the files {init_net_path} and {predict_net_path}."