Ejemplo n.º 1
0
 def prepare(cls, model, device='CPU', **kwargs):
     super(DummyBackend, cls).prepare(model, device, **kwargs)
     # Save model to disk as temp_model.onnx.
     onnx.save(model, "temp_model.onnx")
     # Call frontend to process temp_model.onnx, bit code will be generated.
     execute_commands([ONNX_MLIR, "temp_model.onnx"])
     return ExecutionSession("./temp_model.so",
                             "_dyn_entry_point_main_graph")
Ejemplo n.º 2
0
Archivo: test.py Proyecto: negiyas/ONNF
 def prepare(cls, model, device='CPU', **kwargs):
     super(DummyBackend, cls).prepare(model, device, **kwargs)
     # Save model to disk as temp_model.onnx.
     onnx.save(model, "temp_model.onnx")
     # Call frontend to process temp_model.onnx, bit code will be generated.
     execute_commands([ONNF, "temp_model.onnx"])
     # Call llc to generate object file from bitcode.
     execute_commands(
         [LLC, "-filetype=obj", "-relocation-model=pic", "model.bc"])
     # Generate shared library from object file, linking with c runtime.
     execute_commands([
         CXX, "-shared", "-fPIC", "model.o", "-o", "model.so",
         "-L" + RUNTIME_DIR, "-lcruntime"
     ])
     return ExecutionSession("./model.so", "_dyn_entry_point_main_graph")