Exemple #1
0
def showInNetron(model_filename: str,
                 localhost_url: str = None,
                 port: int = None):
    """Shows a ONNX model file in the Jupyter Notebook using Netron.

    :param model_filename: The path to the ONNX model file.
    :type model_filename: str

    :param localhost_url: The IP address used by the Jupyter IFrame to show the model.
     Defaults to localhost.
    :type localhost_url: str, optional

    :param port: The port number used by Netron and the Jupyter IFrame to show
     the ONNX model.  Defaults to 8081.
    :type port: int, optional

    :return: The IFrame displaying the ONNX model.
    :rtype: IPython.lib.display.IFrame
    """
    try:
        port = port or int(os.getenv("NETRON_PORT", default="8081"))
    except ValueError:
        port = 8081
    localhost_url = localhost_url or os.getenv("LOCALHOST_URL",
                                               default="localhost")
    netron.start(model_filename, address=("0.0.0.0", port), browse=False)
    return IFrame(src=f"http://{localhost_url}:{port}/",
                  width="100%",
                  height=400)
    def Visualize_With_Netron(self, data_shape=None, port=None):
        '''
        Visualize network with netron library 

        Args:
            data_shape (tuple): Input shape of data in format C, H, W
            port (int): Local host free port.

        Returns:
            None
        '''
        self.custom_print("Using Netron To Visualize")
        self.custom_print("Not compatible on kaggle")
        self.custom_print("Compatible only for Jupyter Notebooks")

        if not data_shape:
            self.custom_print("Provide data_shape argument")
            pass
        else:
            c, h, w = data_shape

        batch_size = 1
        x = tf.placeholder(tf.float32, shape=(batch_size, h, w, c))
        y = self.system_dict["local"]["model"](x)

        self.system_dict["local"]["model"].save("final.h5")

        import netron
        if (not port):
            netron.start('final.h5')
        else:
            netron.start('final.h5', port=port)
    def Visualize_With_Netron(self, data_shape=None, port=None):
        self.custom_print("Using Netron To Visualize");
        self.custom_print("Not compatible on kaggle");
        self.custom_print("Compatible only for Jupyter Notebooks");

        if not data_shape:
            self.custom_print("Provide data_shape argument");
            pass;
        else:
            c, h, w = data_shape;

        
        batch_size=1;
        x = tf.placeholder(tf.float32, shape=(batch_size, h, w, c))
        y = self.system_dict["local"]["model"](x)

        self.system_dict["local"]["model"].save("final.h5");



        import netron
        if(not port):
            netron.start('final.h5')
        else:
            netron.start('final.h5', port=port)



    ###############################################################################################################################################
    def Visualize_With_Netron(self, data_shape=None, port=None):
        self.custom_print("Using Netron To Visualize");
        self.custom_print("Not compatible on kaggle");
        self.custom_print("Compatible only for Jupyter Notebooks");

        if not data_shape:
            c, h, w = self.system_dict["dataset"]["params"]["data_shape"];
        else:
            c, h, w = data_shape;

        data = mx.nd.random.randn(1, c, h, w)
        if(self.system_dict["model"]["params"]["use_gpu"]):
            self.system_dict["local"]["ctx"] = [mx.gpu(0)];
        else:
            self.system_dict["local"]["ctx"] = [mx.cpu()];

        data = data.copyto(self.system_dict["local"]["ctx"][0])

        self.system_dict["local"]["model"].hybridize();
        out = self.system_dict["local"]["model"](data);

        self.system_dict["local"]["model"].export("model", epoch=0)

        import netron
        if(not port):
            netron.start('model-symbol.json')
        else:
            netron.start('model-symbol.json', port=port)
    ###############################################################################################################################################
Exemple #5
0
def execute(args):
    project_path = os.getcwd()
    model = get_model(args.model, args.param, project_path)
    sess = tf.Session()
    tf.train.write_graph(sess.graph_def, project_path + "/debug",
                         'model.pbtxt')
    netron.start(project_path + "/debug/model.pbtxt")
def network_visualizer(model):

    print(
        '---We are using Open Source Library Netron for Network Visualization---'
    )
    print('--- Supoort Netron at https://github.com/lutzroeder/netron ---')

    netron.start(model)
Exemple #7
0
def modelview():
    netron.start(
        file=
        "C:\\Users\\ruthv\\PycharmProjects\\LOGytics\\model\\lstm_sentence.hdf5"
    )
    text = open('static/summary/' + 'lstm_sentence_summary' + '.txt', 'r+')
    summary = text.read()
    return render_template('index.html', t=title, h=heading, summary=summary)
Exemple #8
0
 def run(self):
     while True:
         if not self.model_path == "":
             print("start")
             netron.start(self.model_path, browse=False, host="0.0.0.0")
             print("end")
             self.model_path = ""
         print("sleeping")
         time.sleep(1)
def execute(args):
    project_path = os.getcwd()
    #model = get_model(args.model, args.param, project_path)
    #sess = tf.Session()
    #tf.train.write_graph(sess.graph_def, project_path + "/debug", 'model.pbtxt')
    #netron.start(project_path+"/debug/model.pbtxt")
    tflite_path = project_path + "/result/model_" + str(args.model) + \
                "_" + str(args.param) + "_" + args.dataset
    netron.start(tflite_path + "/model_tflite.tflite")
Exemple #10
0
def visualize():

    if request.method == 'POST':
        response_object = {'status': 'success'}
        temp_model = request.files['file']
        model_name = temp_model.filename

        request.files['file'].save(model_name)

        netron.start(model_name, browse=False, host='0.0.0.0')

    return jsonify(response_object)
Exemple #11
0
def load_img_and_run(model_path, data_path, model_name, show_netron=False):
    print('{}...'.format(model_name))
    onnx_model = onnx.load(model_path)
    opset = onnx_model.opset_import[0].version
    print('Opset from:', opset)
    if show_netron:
        netron.start(model_path, port=9930)
    before_opset = onnx_model.opset_import[0].version
    # Preprocess image
    params_name = [param.name for param in onnx_model.graph.initializer]
    for input_tensor in onnx_model.graph.input:
        if input_tensor.name not in params_name:
            input_name = input_tensor.name
            input_shape = tuple(
                dim.dim_value
                for dim in input_tensor.type.tensor_type.shape.dim)
            break
    if model_name == 'efficientnet-b1':
        tfms = transforms.Compose([
            transforms.Resize((input_shape[-2], input_shape[-1])),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ])
        data = np.array(tfms(Image.open(data_path)), "float32")
    else:
        data = np.array(Image.open(data_path), "float32")
    new_shape = [1 for s in range(len(input_shape) - len(data.shape))]
    new_shape.extend(data.shape)
    data = data.reshape(new_shape)
    inputs = {input_name: data}
    shape_dict = {k: v.shape for k, v in inputs.items()}
    output_shape = [
        dim.dim_value
        for dim in onnx_model.graph.output[0].type.tensor_type.shape.dim
    ]
    mod, params = relay.frontend.from_onnx(onnx_model, shape=shape_dict)
    rt_mod = get_rt_mod(inputs, mod, params)
    before_output = rt_mod.get_output(0, tvm.nd.empty(output_shape,
                                                      'float32')).asnumpy()
    onnx_model = relay.frontend.to_onnx(mod, params, model_name, opset=opset)
    onnx.save(onnx_model, 'model.onnx')
    if show_netron:
        netron.start('model.onnx', port=3030)
    onnx_model = onnx.load('model.onnx')
    after_opset = onnx_model.opset_import[0].version
    mod, params = relay.frontend.from_onnx(onnx_model, shape=shape_dict)
    rt_mod = get_rt_mod(inputs, mod, params)
    after_output = rt_mod.get_output(0, tvm.nd.empty(output_shape,
                                                     'float32')).asnumpy()

    assert np.array_equal(before_output,
                          after_output), 'The outputs of are different!'
Exemple #12
0
    def Visualize_With_Netron(self, data_shape=None, port=None):
        '''
        Visualize network with netron library 

        Args:
            data_shape (tuple): Input shape of data in format C, H, W
            port (int): Local host free port.

        Returns:
            None
        '''
        self.custom_print("Using Netron To Visualize")
        self.custom_print("Not compatible on kaggle")
        self.custom_print("Compatible only for Jupyter Notebooks")

        if not data_shape:
            self.custom_print("Provide data_shape argument")
            pass
        else:
            c, h, w = data_shape

        # Input to the model
        x = torch.randn(1, c, h, w, requires_grad=True)
        x = x.to(self.system_dict["local"]["device"])
        torch_out = self.system_dict["local"]["model"](x)

        # Export the model
        torch.onnx.export(
            self.system_dict["local"]["model"],  # model being run
            x,  # model input (or a tuple for multiple inputs)
            "model.onnx",  # where to save the model (can be a file or file-like object)
            export_params=
            True,  # store the trained parameter weights inside the model file
            opset_version=10,  # the ONNX version to export the model to
            do_constant_folding=
            True,  # whether to execute constant folding for optimization
            input_names=['input'],  # the model's input names
            output_names=['output'],  # the model's output names
            dynamic_axes={
                'input': {
                    0: 'batch_size'
                },  # variable lenght axes
                'output': {
                    0: 'batch_size'
                }
            })

        import netron
        if (not port):
            netron.start('model.onnx')
        else:
            netron.start('model.onnx', port=port)
Exemple #13
0
def visualize():
    """
    Start netron with model from request.
    Returns:
        JSON response indicting success.
    """
    if request.method == 'POST':
        response_object = {'status': 'success'}
        temp_model = request.files['file']
        model_name = temp_model.filename

        request.files['file'].save(model_name)

        netron.start(model_name, browse=False, host='0.0.0.0')

    return jsonify(response_object)
Exemple #14
0
def load_pb_and_run(model_path, data_path, model_name, show_netron=False):
    print('{}...'.format(model_name))
    onnx_model = onnx.load_model(model_path)
    opset = onnx_model.opset_import[0].version
    print('Opset from:', opset)
    if show_netron:
        netron.start(model_path)
    new_tensor = onnx.TensorProto()  # 先建立一個空的 TensorProto 物件
    with open(data_path, 'rb') as f:
        new_tensor.ParseFromString(f.read())
    before_opset = onnx_model.opset_import[0].version
    params_name = [param.name for param in onnx_model.graph.initializer]
    shape_dict = {}
    data = numpy_helper.to_array(new_tensor)
    for input_tensor in onnx_model.graph.input:
        if input_tensor.name not in params_name:
            input_name = input_tensor.name
            input_shape = []
            for i, dim in enumerate(input_tensor.type.tensor_type.shape.dim):
                input_shape.append(dim.dim_value if isinstance(
                    dim.dim_value, int) else data.shape[i])
            shape_dict[input_name] = input_shape
    data = data.reshape(input_shape)
    inputs = {input_name: data}
    output_shape = [
        dim.dim_value
        for dim in onnx_model.graph.output[0].type.tensor_type.shape.dim
    ]
    mod, params = relay.frontend.from_onnx(onnx_model, shape=shape_dict)
    #print(before)
    rt_mod = get_rt_mod(inputs, mod, params)
    before_output = rt_mod.get_output(0, tvm.nd.empty(output_shape,
                                                      'float32')).asnumpy()
    onnx_model = relay.frontend.to_onnx(mod, params, model_name, opset=opset)
    onnx.save(onnx_model, 'model.onnx')
    if show_netron:
        netron.start('model.onnx', port=3030)
    onnx_model = onnx.load('model.onnx')
    after_opset = onnx_model.opset_import[0].version
    print(after_opset)
    mod, params = relay.frontend.from_onnx(onnx_model, shape=shape_dict)
    rt_mod = get_rt_mod(inputs, mod, params)
    after_output = rt_mod.get_output(0, tvm.nd.empty(output_shape,
                                                     'float32')).asnumpy()
    assert np.array_equal(before_output,
                          after_output), 'The outputs of are different!'
def visual_mode(mode_type='netron',
                pattern_name=None,
                pattern_idx=0,
                graph_path=None):
    def get_graph_def(pattern_name=None, pattern_idx=0, graph_path=None):
        assert (pattern_name is not None) or (graph_path is not None), \
                "pattern_name or graph_path should at least have one is None and the other is not."
        if graph_path is not None:
            name = graph_path.split('/')[-1]
            with open(graph_path, "rb") as f:
                graph_def = tf.GraphDef()
                graph_def.ParseFromString(f.read())
        else:
            name = pattern_name
            graph_def = RegistPattern.get_patterns(pattern_name)[pattern_idx]
        return graph_def, name

    graph_def, name = get_graph_def(pattern_name=pattern_name,
                                    pattern_idx=pattern_idx,
                                    graph_path=graph_path)
    if mode_type == 'netron':
        tmp_dir = tempfile.mkdtemp()
        model_path = tmp_dir + "/" + name
        with open(model_path, "wb") as f:
            f.write(graph_def.SerializeToString())
        netron.start(file=model_path,
                     host=get_ip_address(),
                     port=flags.FLAGS.pt)
        shutil.rmtree(tmp_dir)

    else:  # type == 'tf'
        with Session() as sess:
            tmp_dir = tempfile.mkdtemp()
            tf.import_graph_def(graph_def)
            train_writer = summary.FileWriter(tmp_dir)
            train_writer.add_graph(sess.graph)
            train_writer.flush()
            train_writer.close()
            tb = program.TensorBoard(default.get_plugins())
            tb.configure(argv=[
                None, '--logdir', tmp_dir, '--port', flags.FLAGS.pt, '--host',
                get_ip_address()
            ])
            tb.main()
            shutil.rmtree(tmp_dir)
def visualization(filename):
    file_name = onnx_folder_path + filename
    print(file_name)
    model_name = str(os.path.basename(file_name))
    file_size = (str(os.path.getsize(file_name)) + ' B')
    last_times = str(time.ctime(os.path.getmtime(file_name)))
    input_type, output_type = onnxruntime_imformation(file_name)
    model_type = onnx_type(file_name)
    print('model_type 정보는=', model_type)

    netron.start(file=file_name, browse=False, port=netron_port, host=server)
    return render_template('netron_wrapper.html',
                           netron_addr=f'http://{server}:{netron_port}',
                           models_name=model_name,
                           models_type=model_type,
                           files_size=file_size,
                           last_time=last_times,
                           input_type=input_type,
                           output_type=output_type)
Exemple #17
0
def showInNetron(model_filename: str, address: str = None, port: int = 8081):
    """Shows the ONNX model in Jupyter Notebook.

    Args:
        model_filename (str): the path to the model file to show.
        address (str, optional): The IP address used by Netron to show the model
        graph. Defaults to None.
        port (int, optional): The port number use by Netron to show the model
        graph. Defaults to 8081.

    Returns:
        IFrame: The IFrame where the model is shown.
    """
    if address is not None:
        address = netron.start(
            file=model_filename, address=(address, port), browse=False
        )
    else:
        address = netron.start(file=model_filename, address=port, browse=False)
    return IFrame(src=f"http://{address[0]}:{address[1]}/", width="100%", height=400)
Exemple #18
0
    def Visualize_With_Netron(self, data_shape=None, port=None):
        '''
        Visualize network with netron library 

        Args:
            data_shape (tuple): Input shape of data in format C, H, W
            port (int): Local host free port.

        Returns:
            None
        '''
        self.custom_print("Using Netron To Visualize")
        self.custom_print("Not compatible on kaggle")
        self.custom_print("Compatible only for Jupyter Notebooks")

        if not data_shape:
            c, h, w = self.system_dict["dataset"]["params"]["data_shape"]
        else:
            c, h, w = data_shape

        data = mx.nd.random.randn(1, c, h, w)
        if (self.system_dict["model"]["params"]["use_gpu"]):
            self.system_dict["local"]["ctx"] = [mx.gpu(0)]
        else:
            self.system_dict["local"]["ctx"] = [mx.cpu()]

        data = data.copyto(self.system_dict["local"]["ctx"][0])

        self.system_dict["local"]["model"].hybridize()
        out = self.system_dict["local"]["model"](data)

        self.system_dict["local"]["model"].export("model", epoch=0)

        import netron
        if (not port):
            netron.start('model-symbol.json')
        else:
            netron.start('model-symbol.json', port=port)
    def get_topk(self, text):
        input_len = len(text)
        text2id, segments = self.get_id_from_text(text)
        self.model.eval()

        # 启动netron
        onnx_path = '../onnx_model_name.onnx'
        torch.onnx.export(self.model, (text2id, segments), onnx_path)
        netron.start(onnx_path)

        with torch.no_grad():
            result = []
            output_tensor = self.model(text2id, segments)[:, 1:input_len + 1, :]
            output_tensor = torch.nn.Softmax(dim=-1)(output_tensor)
            output_topk_prob = torch.topk(output_tensor, 5).values.squeeze(0).tolist()
            output_topk_indice = torch.topk(output_tensor, 5).indices.squeeze(0).tolist()
            for i, words in enumerate(output_topk_indice):
                tmp = []
                for j, candidate in enumerate(words):
                    word = self.roberta_data.tokenizer.id_to_token(candidate)
                    tmp.append(word)
                result.append(tmp)
        return result, output_topk_prob
import netron
netron.start('./nanodet-simple.onnx', port=3344)

Exemple #21
0
import netron

netron.start("./model/wordavg-model.pth")
Exemple #22
0
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()



interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data)



## To visualize and download the weights from tflite model
import netron
netron.start(model_path_pruned_quantized, port = 8000, host = 'localhost')



tflite_weights_path = 'Models/tflite'
weights = []

for file in os.listdir(tflite_weights_path):
    kernel = np.load(os.path.join(tflite_weights_path,file))
    print(kernel.shape)
    weights.extend(kernel.flatten())
print(len(weights))



plt.figure()
class model(nn.Module):
    def __init__(self):
        super(model, self).__init__()
        self.block1 = nn.Sequential(
            nn.Conv2d(64, 64, 3, padding=1, bias=False), nn.BatchNorm2d(64),
            nn.ReLU(inplace=True), nn.Conv2d(64, 32, 1, bias=False),
            nn.BatchNorm2d(32), nn.ReLU(inplace=True),
            nn.Conv2d(32, 64, 3, padding=1, bias=False), nn.BatchNorm2d(64))

        self.conv1 = nn.Conv2d(3, 64, 3, padding=1, bias=False)
        self.output = nn.Sequential(nn.Conv2d(64, 1, 3, padding=1, bias=True),
                                    nn.Sigmoid())

    def forward(self, x):
        x = self.conv1(x)
        identity = x
        x = F.relu(self.block1(x) + identity)
        x = self.output(x)
        return x


d = torch.rand(1, 3, 416, 416)
m = model()
o = m(d)

onnx_path = "onnx_model_name.onnx"
torch.onnx.export(m, d, onnx_path)

netron.start(onnx_path)
 def show_from_onnx(self, onnx_path):
     netron.start(onnx_path, port=9999, host='localhost')
Exemple #25
0
import netron

netron.start(
    '/home/sysman/zlf/yolov5-master/runs/original_v5s_weights/weights/best.onnx'
)
import netron

# Change it to False to skip viewing the optimized model in browser.
enable_netron = True
if enable_netron:
    netron.start("onnx/optimized_model_cpu.onnx")
    # netron.start("onnx/bert-base-cased.onnx")
    # netron.start("onnx/vinai/phobert-base.onnx")
    # netron.start("onnx/phobert-base-formaskedlm.onnx")
Exemple #27
0
import netron
netron.start('./effnetv2.onnx', port=3344)

onnx.checker.check_model(test)

# 输出onnx的计算图
# print(onnx.helper.printable_graph(test.graph))
print("\nonnx output ==> Passed!\n")

# 计算转换后的输出误差
onnx_model = onnx.load_model(export_onnx_file)  # 读取onnx模型参数,构建模型
sess = ort.InferenceSession(onnx_model.SerializeToString()) # 推理模型成员初始化
sess.set_providers(['CPUExecutionProvider'])  # 将模型部署至cpu
input_name = sess.get_inputs()[0].name  # 读取网络输入名称
output_name = sess.get_outputs()[0].name  # 读取网络输出名称
onnx_output = sess.run([output_name], {input_name: x.cpu().numpy()})  # 读取数据进行onnx推理

# 计算转换误差
evalue = np.absolute(np.mean(torch_output_value - onnx_output))
print("\ntorch to onnx erro: ", evalue)

# 显示网络输出及结构图
session = ort.InferenceSession(
    export_onnx_file)  # 创建一个运行session,类似于tensorflow
out_r = session.run(None, {"input": np.random.rand(
    1, 3, 112, 112).astype('float32')})  # 模型运行,注意这里的输入必须是numpy类型

# 显示网络输出shape
print('网络输出shape:')
print(out_r[0].shape)

# 显示结构图
netron.start(export_onnx_file)
Exemple #29
0
import netron

netron.start('models/banknote_best.onnx', port=8081)
Exemple #30
0
import netron

netron.start(None)