def get_cat_image(): url = 'https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png' dst = 'cat.png' real_dst = download_testdata(url, dst, module='data') img = Image.open(real_dst).resize((224, 224)) img = np.transpose(img, (2, 0, 1))[np.newaxis, :] return np.asarray(img)
def get_workload_official(model_url, model_sub_path): """ Import workload from tensorflow official Parameters ---------- model_url: str URL from where it will be downloaded. model_sub_path: Sub path in extracted tar for the ftozen protobuf file. temp_dir: TempDirectory The temporary directory object to download the content. Returns ------- graph_def: graphdef graph_def is the tensorflow workload for mobilenet. """ model_tar_name = os.path.basename(model_url) model_path = download_testdata(model_url, model_tar_name, module=['tf', 'official']) dir_path = os.path.dirname(model_path) import tarfile if model_path.endswith("tgz") or model_path.endswith("gz"): tar = tarfile.open(model_path) tar.extractall(path=dir_path) tar.close() else: raise RuntimeError('Could not decompress the file: ' + model_path) return os.path.join(dir_path, model_sub_path)
def get_workload_ptb(): """ Import ptb workload from frozen protobuf Parameters ---------- Nothing. Returns ------- graph_def: graphdef graph_def is the tensorflow workload for ptb. word_to_id : dict English word to integer id mapping id_to_word : dict Integer id to English word mapping """ sample_repo = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/' sample_data_file = 'simple-examples.tgz' sample_url = sample_repo+sample_data_file ptb_model_file = 'RNN/ptb/ptb_model_with_lstmblockcell.pb' import tarfile file_path = download_testdata(sample_url, sample_data_file, module=['data', 'ptb_data']) dir_path = os.path.dirname(file_path) t = tarfile.open(file_path, 'r') t.extractall(dir_path) word_to_id, id_to_word = _create_ptb_vocabulary(dir_path) return word_to_id, id_to_word, get_workload(ptb_model_file)
def download_image(): """Download a cat image and resize it to 224x224 which fits resnet. Returns ------- image : PIL.Image.Image The loaded and resized image. """ print("Downloading cat image...") from matplotlib import pyplot as plt from PIL import Image url = "https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true" img_name = "cat.png" img_path = download_testdata(url, img_name, module='data') image = Image.open(img_path).resize((224, 224)) print("- Cat image downloaded!") plt.imshow(image) plt.show() return image
def get_workload(model_path, model_sub_path=None): """ Import workload from frozen protobuf Parameters ---------- model_path: str model_path on remote repository to download from. model_sub_path: str Model path in the compressed archive. Returns ------- graph_def: graphdef graph_def is the tensorflow workload for mobilenet. """ if model_sub_path: path_model = get_workload_official(model_path, model_sub_path) else: repo_base = 'https://github.com/dmlc/web-data/raw/master/tensorflow/models/' model_url = os.path.join(repo_base, model_path) path_model = download_testdata(model_url, model_path, module='tf') # Creates graph from saved graph_def.pb. with tf.gfile.FastGFile(path_model, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) graph = tf.import_graph_def(graph_def, name='') return graph_def
def download_synset(): """Download a dictionary from class index to name. This lets us know what our prediction actually is. Returns ------- synset : dict[int -> str] The loaded synset. """ print("Downloading synset...") url = "https://gist.githubusercontent.com/zhreshold/" + \ "4d0b62f3d01426887599d4f7ede23ee5/raw/" + \ "596b27d23537e5a1b5751d2b0481ef172f58b539/" + \ "imagenet1000_clsid_to_human.txt" file_name = "imagenet1000_clsid_to_human.txt" file_path = download_testdata(url, file_name, module='data') with open(file_path) as f: synset = eval(f.read()) print("- Synset downloaded!") return synset
def get_real_image(im_height, im_width): img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") return Image.open(img_path).resize((im_height, im_width))
#target = 'cuda' #target_host = 'llvm' #layout = "NCHW" #ctx = tvm.gpu(0) target = 'llvm' target_host = 'llvm' layout = None ctx = tvm.cpu(0) ###################################################################### # Download required files # ----------------------- # Download files listed above. from tvm.contrib.download import download_testdata img_path = download_testdata(image_url, img_name, module='data') model_path = download_testdata(model_url, model_name, module=['tf', 'InceptionV1']) map_proto_path = download_testdata(map_proto_url, map_proto, module='data') label_path = download_testdata(label_map_url, label_map, module='data') ###################################################################### # Import model # ------------ # Creates tensorflow graph definition from protobuf file. with tf_compat_v1.gfile.GFile(model_path, 'rb') as f: graph_def = tf_compat_v1.GraphDef() graph_def.ParseFromString(f.read()) graph = tf.import_graph_def(graph_def, name='')
# .. code-block:: bash # # cd /workspace/apps/android_rpc # python tests/android_rpc_test.py # ###################################################################### # Load pretrained keras model # ---------------------------- # We load a pretrained MobileNetV2(alpha=0.5) classification model provided by keras. keras.backend.clear_session() # Destroys the current TF graph and creates a new one. weights_url = ''.join(['https://github.com/JonathanCMitchell/', 'mobilenet_v2_keras/releases/download/v1.1/', 'mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5']) weights_file = 'mobilenet_v2_weights.h5' weights_path = download_testdata(weights_url, weights_file, module='keras') keras_mobilenet_v2 = MobileNetV2(alpha=0.5, include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000) keras_mobilenet_v2.load_weights(weights_path) ###################################################################### # In order to test our model, here we download an image of cat and # transform its format. img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true' img_name = 'cat.png' img_path = download_testdata(img_url, img_name, module='data') image = Image.open(img_path).resize((224, 224)) dtype = 'float32' def transform_image(image): image = np.array(image) - np.array([123., 117., 104.])
from tvm.contrib.download import download_testdata from vta.testing import simulator from vta.top import graph_pack # Make sure that TVM was compiled with RPC=1 assert tvm.runtime.enabled("rpc") ############################################################################## # Download yolo net configure file, weight file, darknet library file based on # Model Name # ---------------------------------------------------------------------------- MODEL_NAME = 'yolov3-tiny' REPO_URL = 'https://github.com/dmlc/web-data/blob/master/darknet/' cfg_path = download_testdata( 'https://github.com/pjreddie/darknet/blob/master/cfg/' + MODEL_NAME + '.cfg' + '?raw=true', MODEL_NAME + '.cfg', module="darknet") weights_path = download_testdata('https://pjreddie.com/media/files/' + MODEL_NAME + '.weights' + '?raw=true', MODEL_NAME + '.weights', module="darknet") if sys.platform in ['linux', 'linux2']: darknet_lib_path = download_testdata(REPO_URL + 'lib/' + 'libdarknet2.0.so' + '?raw=true', 'libdarknet2.0.so', module="darknet") elif sys.platform == 'darwin': darknet_lib_path = download_testdata(REPO_URL + 'lib_osx/' + 'libdarknet_mac2.0.so' + '?raw=true',
import numpy as np import tvm import tvm.relay as relay from tvm.contrib.download import download_testdata ###################################################################### # Load pretrained ONNX model # --------------------------------------------- # The example super resolution model used here is exactly the same model in onnx tutorial # http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html # we skip the pytorch model construction part, and download the saved onnx model model_url = ''.join(['https://gist.github.com/zhreshold/', 'bcda4716699ac97ea44f791c24310193/raw/', '93672b029103648953c4e5ad3ac3aadf346a4cdc/', 'super_resolution_0.2.onnx']) model_path = download_testdata(model_url, 'super_resolution.onnx', module='onnx') # now you have super_resolution.onnx on disk onnx_model = onnx.load(model_path) ###################################################################### # Load a test image # --------------------------------------------- # A single cat dominates the examples! from PIL import Image img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true' img_path = download_testdata(img_url, 'cat.png', module='data') img = Image.open(img_path).resize((224, 224)) img_ycbcr = img.convert("YCbCr") # convert to YCbCr img_y, img_cb, img_cr = img_ycbcr.split() x = np.array(img_y)[np.newaxis, np.newaxis, :, :]
from tvm.contrib.download import download_testdata from mxnet.gluon.model_zoo.vision import get_model from PIL import Image from matplotlib import pyplot as plt block = get_model("resnet18_v1", pretrained=True) img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_name = "cat.png" synset_url = "".join([ "https://gist.githubusercontent.com/zhreshold/", "4d0b62f3d01426887599d4f7ede23ee5/raw/", "596b27d23537e5a1b5751d2b0481ef172f58b539/", "imagenet1000_clsid_to_human.txt", ]) synset_name = "imagenet1000_clsid_to_human.txt" img_path = download_testdata(img_url, "cat.png", module="data") synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synset = eval(f.read()) image = Image.open(img_path).resize((224, 224)) plt.imshow(image) plt.show() def transform_image(image): image = np.array(image) - np.array([123.0, 117.0, 104.0]) image /= np.array([58.395, 57.12, 57.375]) image = image.transpose((2, 0, 1)) image = image[np.newaxis, :] return image
"ssd_512_resnet50_v1_coco", "ssd_512_resnet101_v2_voc", "ssd_512_mobilenet1.0_voc", "ssd_512_mobilenet1.0_coco", "ssd_300_vgg16_atrous_voc" "ssd_512_vgg16_atrous_coco", ] model_name = supported_model[0] dshape = (1, 3, 512, 512) ###################################################################### # Download and pre-process demo image im_fname = download_testdata( "https://github.com/dmlc/web-data/blob/main/" + "gluoncv/detection/street_small.jpg?raw=true", "street_small.jpg", module="data", ) x, img = data.transforms.presets.ssd.load_test(im_fname, short=512) ###################################################################### # Convert and compile model for CPU. block = model_zoo.get_model(model_name, pretrained=True) def build(target): mod, params = relay.frontend.from_mxnet(block, {"data": dshape}) with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target, params=params) return lib
"""Store for onnx examples and common models.""" from __future__ import absolute_import as _abs import os import logging from .super_resolution import get_super_resolution from tvm.contrib.download import download_testdata URLS = { 'super_resolution.onnx': 'https://gist.github.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/93672b029103648953c4e5ad3ac3aadf346a4cdc/super_resolution_0.2.onnx', 'squeezenet1_1.onnx': 'https://gist.github.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/93672b029103648953c4e5ad3ac3aadf346a4cdc/squeezenet1_1_0.2.onnx', 'lenet.onnx': 'https://gist.github.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/93672b029103648953c4e5ad3ac3aadf346a4cdc/lenet_0.2.onnx', 'resnet18_1_0.onnx': 'https://gist.github.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/b385b1b242dc89a35dd808235b885ed8a19aedc1/resnet18_1.0.onnx'} # download and add paths for k, v in URLS.items(): name = k.split('.')[0] relpath = os.path.join('onnx', k) abspath = download_testdata(v, relpath, module='onnx') locals()[name] = abspath # symbol for graph comparison super_resolution_sym = get_super_resolution()
model = TraceWrapper(model_func(pretrained=True)) model.eval() inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size))) with torch.no_grad(): out = model(inp) script_module = do_trace(model, inp) ###################################################################### # Download a test image and pre-process # ------------------------------------- img_url = ("https://raw.githubusercontent.com/dmlc/web-data/" "master/gluoncv/detection/street_small.jpg") img_path = download_testdata(img_url, "test_street_small.jpg", module="data") img = cv2.imread(img_path).astype("float32") img = cv2.resize(img, (in_size, in_size)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = np.transpose(img / 255.0, [2, 0, 1]) img = np.expand_dims(img, axis=0) ###################################################################### # Import the graph to Relay # ------------------------- input_name = "input0" shape_list = [(input_name, input_shape)] mod, params = relay.frontend.from_pytorch(script_module, shape_list) ######################################################################
#target_host = 'llvm' layout = "NCHW" #ctx = tvm.gpu(0) target = 'llvm' target_host = 'llvm' #layout = None ctx = tvm.cpu(0) ###################################################################### # Download required files # ----------------------- # Download files listed above. from tvm.contrib.download import download_testdata model_path = '/home/dolphin/Downloads/frozen_inference_graph_fuse_bn_640x360.pb' img_path = download_testdata(image_url, img_name, module='data') ###################################################################### # Import model # ------------ # Creates tensorflow graph definition from protobuf file. with tf.gfile.FastGFile(model_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) graph = tf.import_graph_def(graph_def, name='') # Call the utility to import the graph definition into default graph. graph_def = tf_testing.ProcessGraphDefParam(graph_def) # Add shapes to the graph. #with tf.Session() as sess: #graph_def = tf_testing.AddShapesToGraphDef(sess, 'softmax')
tar.close() else: raise RuntimeError("Could not decompress the file: " + path) ###################################################################### # Load pretrained TFLite model # ---------------------------- # Load mobilenet V1 TFLite model provided by Google from tvm.contrib.download import download_testdata model_url = "http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz" # Download model tar file and extract it to get mobilenet_v1_1.0_224.tflite model_path = download_testdata(model_url, "mobilenet_v1_1.0_224.tgz", module=["tf", "official"]) model_dir = os.path.dirname(model_path) extract(model_path) # Now we can open mobilenet_v1_1.0_224.tflite tflite_model_file = os.path.join(model_dir, "mobilenet_v1_1.0_224.tflite") tflite_model_buf = open(tflite_model_file, "rb").read() # Get TFLite model from buffer try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model
def _load_net(cfg_url, cfg_name, weights_url, weights_name): cfg_path = download_testdata(cfg_url, cfg_name, module="darknet") weights_path = download_testdata(weights_url, weights_name, module="darknet") net = LIB.load_network(cfg_path.encode("utf-8"), weights_path.encode("utf-8"), 0) return net
import numpy as np import tvm from tvm import te from tvm.contrib import graph_runtime from tvm.contrib.download import download_testdata download_testdata.__test__ = False from tvm.relay.testing.darknet import LAYERTYPE from tvm.relay.testing.darknet import __darknetffi__ from tvm.relay.frontend.darknet import ACTIVATION from tvm import relay REPO_URL = "https://github.com/dmlc/web-data/blob/main/darknet/" DARKNET_LIB = "libdarknet2.0.so" DARKNETLIB_URL = REPO_URL + "lib/" + DARKNET_LIB + "?raw=true" LIB = __darknetffi__.dlopen(download_testdata(DARKNETLIB_URL, DARKNET_LIB, module="darknet")) DARKNET_TEST_IMAGE_NAME = "dog.jpg" DARKNET_TEST_IMAGE_URL = REPO_URL + "data/" + DARKNET_TEST_IMAGE_NAME + "?raw=true" DARKNET_TEST_IMAGE_PATH = download_testdata( DARKNET_TEST_IMAGE_URL, DARKNET_TEST_IMAGE_NAME, module="data" ) def astext(program, unify_free_vars=False): """check that program is parsable in text format""" text = program.astext() if isinstance(program, relay.Expr): roundtrip_program = tvm.parser.parse_expr(text) else: roundtrip_program = tvm.parser.fromtext(text)
def get_real_image(im_height, im_width): repo_base = 'https://github.com/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/' img_name = 'elephant-299.jpg' image_url = os.path.join(repo_base, img_name) img_path = download_testdata(image_url, img_name, module='data') return Image.open(img_path).resize((im_height, im_width))
import tvm import tvm.relay as relay from tvm.contrib.download import download_testdata import keras import numpy as np ###################################################################### # Load pretrained keras model # ---------------------------- # We load a pretrained resnet-50 classification model provided by keras. weights_url = ''.join([ 'https://github.com/fchollet/deep-learning-models/releases/', 'download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5' ]) weights_file = 'resnet50_weights.h5' weights_path = download_testdata(weights_url, weights_file, module='keras') keras_resnet50 = keras.applications.resnet50.ResNet50(include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000) keras_resnet50.load_weights(weights_path) ###################################################################### # Load a test image # ------------------ # A single cat dominates the examples! from PIL import Image from matplotlib import pyplot as plt from keras.applications.resnet50 import preprocess_input img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
# `MXNet Gluon model zoo <https://mxnet.incubator.apache.org/api/python/gluon/model_zoo.html>`_. # You can found more details about this part at tutorial :ref:`tutorial-from-mxnet`. from mxnet.gluon.model_zoo.vision import get_model from PIL import Image import numpy as np # one line to get the model block = get_model('resnet18_v1', pretrained=True) ###################################################################### # In order to test our model, here we download an image of cat and # transform its format. img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true' img_name = 'cat.png' img_path = download_testdata(img_url, img_name, module='data') image = Image.open(img_path).resize((224, 224)) def transform_image(image): image = np.array(image) - np.array([123., 117., 104.]) image /= np.array([58.395, 57.12, 57.375]) image = image.transpose((2, 0, 1)) image = image[np.newaxis, :] return image x = transform_image(image) ###################################################################### # synset is used to transform the label from number of ImageNet class to # the word human can understand. synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/',
# `MXNet Gluon model zoo <https://mxnet.incubator.apache.org/api/python/gluon/model_zoo.html>`_. # You can found more details about this part at tutorial :ref:`tutorial-from-mxnet`. from mxnet.gluon.model_zoo.vision import get_model from PIL import Image import numpy as np # only one line to get the model block = get_model('resnet18_v1', pretrained=True) ###################################################################### # In order to test our model, here we download an image of cat and # transform its format. img_name = 'cat.png' img_path = download_testdata( 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true', img_name, module='data') image = Image.open(img_path).resize((224, 224)) def transform_image(image): image = np.array(image) - np.array([123., 117., 104.]) image /= np.array([58.395, 57.12, 57.375]) image = image.transpose((2, 0, 1)) image = image[np.newaxis, :] return image x = transform_image(image) ######################################################################
by the script. """ import numpy as np import tvm from tvm.contrib import graph_runtime from tvm.contrib.download import download_testdata download_testdata.__test__ = False from nnvm import frontend from nnvm.testing.darknet import LAYERTYPE from nnvm.testing.darknet import __darknetffi__ import nnvm.compiler DARKNET_LIB = 'libdarknet2.0.so' DARKNETLIB_URL = 'https://github.com/siju-samuel/darknet/blob/master/lib/' \ + DARKNET_LIB + '?raw=true' LIB = __darknetffi__.dlopen(download_testdata(DARKNETLIB_URL, DARKNET_LIB, module='darknet')) DARKNET_TEST_IMAGE_NAME = 'dog.jpg' DARKNET_TEST_IMAGE_URL = 'https://github.com/siju-samuel/darknet/blob/master/data/' + DARKNET_TEST_IMAGE_NAME +'?raw=true' DARKNET_TEST_IMAGE_PATH = download_testdata(DARKNET_TEST_IMAGE_URL, DARKNET_TEST_IMAGE_NAME, module='data') def _read_memory_buffer(shape, data, dtype='float32'): length = 1 for x in shape: length *= x data_np = np.zeros(length, dtype=dtype) for i in range(length): data_np[i] = data[i] return data_np.reshape(shape) def _get_tvm_output(net, data, build_dtype='float32'):
"https://github.com/fchollet/deep-learning-models/releases/", "download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5", ] ) weights_file = "resnet50_keras_old.h5" else: weights_url = "".join( [ " https://storage.googleapis.com/tensorflow/keras-applications/", "resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5", ] ) weights_file = "resnet50_keras_new.h5" weights_path = download_testdata(weights_url, weights_file, module="keras") keras_resnet50 = keras.applications.resnet50.ResNet50( include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000 ) keras_resnet50.load_weights(weights_path) ###################################################################### # Load a test image # ------------------ # A single cat dominates the examples! from PIL import Image from matplotlib import pyplot as plt from keras.applications.resnet50 import preprocess_input img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data")
###################################################################### # Download MXNet SSD pre-trained model and demo image # --------------------------------------------------- # Pre-trained model available at # https://github.com/apache/incubator-\mxnet/tree/master/example/ssd model_url = "https://github.com/zhreshold/mxnet-ssd/releases/download/v0.6/" \ "resnet50_ssd_512_voc0712_trainval.zip" image_url = "https://cloud.githubusercontent.com/assets/3307514/20012567/" \ "cbb60336-a27d-11e6-93ff-cbc3f09f5c9e.jpg" inference_symbol_folder = \ "c1904e900848df4548ce5dfb18c719c7-a28c4856c827fe766aa3da0e35bad41d44f0fb26" inference_symbol_url = "https://gist.github.com/kevinthesun/c1904e900848df4548ce5dfb18c719c7/" \ "archive/a28c4856c827fe766aa3da0e35bad41d44f0fb26.zip" model_file_path = download_testdata(model_url, model_file, module=["mxnet", "ssd_model"]) inference_symbol_path = download_testdata(inference_symbol_url, "inference_model.zip", module=["mxnet", "ssd_model"]) test_image_path = download_testdata(image_url, test_image, module="data") model_dir = os.path.dirname(model_file_path) zip_ref = zipfile.ZipFile(model_file_path, 'r') zip_ref.extractall(model_dir) zip_ref.close() zip_ref = zipfile.ZipFile(inference_symbol_path) zip_ref.extractall(model_dir) zip_ref.close() ###################################################################### # Convert and compile model with NNVM or Relay for CPU.
def find_of_download(model_name): model_url = get_model_url(model_name) model_file_name = get_name_from_url(model_url) return download_testdata(model_url, model_file_name, module="models")
def get_cat_image(): img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") shutil.copyfile(img_path, "cat.png") img = Image.open(img_path).resize((224, 224)) return transform_image(img)
def main(): # one line to get the model block = get_model('resnet18_v1', pretrained=True) # test model img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true' img_name = 'cat.png' img_path = download_testdata(img_url, img_name, module='data') image = Image.open(img_path).resize((224, 224)) # tvm specific data path # print(img_path) x = transform_image(image) # label number to word dict prepped with synset synset_url = ''.join([ 'https://gist.githubusercontent.com/zhreshold/', '4d0b62f3d01426887599d4f7ede23ee5/raw/', '596b27d23537e5a1b5751d2b0481ef172f58b539/', 'imagenet1000_clsid_to_human.txt' ]) synset_name = 'imagenet1000_clsid_to_human.txt' synset_path = download_testdata(synset_url, synset_name, module='data') with open(synset_path) as f: synset = eval(f.read()) # print(synset) # Port GLuon model to portable computational graph batch_size = 1 num_classes = 1000 image_shape = (3, 224, 224) data_shape = (batch_size, ) + image_shape shape_dict = {'data': x.shape} mod, params = relay.frontend.from_mxnet(block, shape_dict) # we want a probability so add a softmax operator func = mod["main"] func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs) # compile the graph to run on RaspPi modelB local_demo = False if local_demo: target = tvm.target.create('llvm') else: target = tvm.target.arm_cpu('rasp3b') with relay.build_config(opt_level=3): graph, lib, params = relay.build(func, target, params=params) # Save the library at local temporary directory. tmp = util.tempdir() lib_fname = tmp.relpath('net.tar') lib.export_library(lib_fname) # RPC server is running on the Rasp Pi. # Get the IP address of the Rasp Pi and connect to the machine to run the net compiled here with Relay. # obtain an RPC session from remote device. if local_demo: remote = rpc.LocalSession() else: # The following is my environment, change this to the IP address of your target device host = '192.168.0.10' port = 9090 remote = rpc.connect(host, port) # upload the library to remote device and load it remote.upload(lib_fname) rlib = remote.load_module('net.tar') # create the remote runtime module ctx = remote.cpu(0) module = runtime.create(graph, rlib, ctx) # set parameter (upload params to the remote device. This may take a while) module.set_input(**params) # set input data module.set_input('data', tvm.nd.array(x.astype('float32'))) # run module.run() # get output out = module.get_output(0) # get top1 result top1 = np.argmax(out.asnumpy()) print('TVM prediction top-1: {}'.format(synset[top1]))
def test_tflite(temp_dir, board, west_cmd, tvm_debug): """Testing a TFLite model.""" if board not in [ "qemu_x86", "mps2_an521", "nrf5340dk_nrf5340_cpuapp", "nucleo_l4r5zi", "qemu_cortex_r5", ]: pytest.skip(msg="Model does not fit.") model = conftest.ZEPHYR_BOARDS[board] input_shape = (1, 32, 32, 3) output_shape = (1, 10) build_config = {"debug": tvm_debug} model_url = "https://github.com/eembc/ulpmark-ml/raw/fc1499c7cc83681a02820d5ddf5d97fe75d4f663/base_models/ic01/ic01_fp32.tflite" model_path = download_testdata(model_url, "ic01_fp32.tflite", module="model") # Import TFLite model tflite_model_buf = open(model_path, "rb").read() try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) # Load TFLite model and convert to Relay relay_mod, params = relay.frontend.from_tflite( tflite_model, shape_dict={"input_1": input_shape}, dtype_dict={"input_1 ": "float32"} ) target = tvm.target.target.micro( model, options=["-link-params=1", "--executor=aot", "--unpacked-api=1", "--interface-api=c"] ) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): lowered = relay.build(relay_mod, target, params=params) # Load sample and generate input/output header files sample_url = "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/testdata_image_classification_fp32_8.npy" sample_path = download_testdata( sample_url, "testdata_image_classification_fp32_8.npy", module="data" ) sample = np.load(sample_path) with tempfile.NamedTemporaryFile() as tar_temp_file: with tarfile.open(tar_temp_file.name, "w:gz") as tf: with tempfile.TemporaryDirectory() as tar_temp_dir: model_files_path = os.path.join(tar_temp_dir, "include") os.mkdir(model_files_path) header_path = generate_c_interface_header( lowered.libmod_name, ["input_1"], ["output"], model_files_path ) tf.add(header_path, arcname=os.path.relpath(header_path, tar_temp_dir)) _create_header_file("input_data", sample, "include", tf) _create_header_file( "output_data", np.zeros(shape=output_shape, dtype="float32"), "include", tf ) project, _ = _build_project( temp_dir, board, west_cmd, lowered, build_config, extra_files_tar=tar_temp_file.name, ) project.flash() with project.transport() as transport: timeout_read = 60 _get_message(transport, "#wakeup", timeout_sec=timeout_read) transport.write(b"start\n", timeout_sec=5) result_line = _get_message(transport, "#result", timeout_sec=timeout_read) result_line = result_line.strip("\n") result_line = result_line.split(":") result = int(result_line[1]) time = int(result_line[2]) logging.info(f"Result: {result}\ttime: {time} ms") assert result == 8
import tvm from tvm import relay ###################################################################### # Download pretrained Quantized TFLite model # ------------------------------------------ # Download mobilenet V2 TFLite model provided by Google from tvm.contrib.download import download_testdata model_url = "https://storage.googleapis.com/download.tensorflow.org/models/" \ "tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz" # Download model tar file and extract it to get mobilenet_v2_1.0_224.tflite model_path = download_testdata(model_url, "mobilenet_v2_1.0_224_quant.tgz", module=['tf', 'official']) model_dir = os.path.dirname(model_path) ###################################################################### # Utils for downloading and extracting zip files # ---------------------------------------------- def extract(path): import tarfile if path.endswith("tgz") or path.endswith("gz"): dir_path = os.path.dirname(path) tar = tarfile.open(path) tar.extractall(path=dir_path) tar.close() else:
ctx = mx.gpu(0) net = get_model("resnet18_v1", pretrained=True) net.collect_params().reset_ctx(ctx) block = get_model('resnet18_v1', pretrained=True) img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true' img_name = 'cat.png' synset_url = ''.join([ 'https://gist.githubusercontent.com/zhreshold/', '4d0b62f3d01426887599d4f7ede23ee5/raw/', '596b27d23537e5a1b5751d2b0481ef172f58b539/', 'imagenet1000_clsid_to_human.txt' ]) synset_name = 'imagenet1000_clsid_to_human.txt' img_path = download_testdata(img_url, 'cat.png', module='data') synset_path = download_testdata(synset_url, synset_name, module='data') with open(synset_path) as f: synset = eval(f.read()) # Load Images img = image.imread(img_path) # Transform img = transform_eval(img) img = img.as_in_context(ctx) run_cnt = 1000 start_time = time.time() for i in range(run_cnt): pred = net(img)
MODEL_NAME = 'rnn' #Seed value seed = 'Thus' #Number of characters to predict num = 1000 # Download required files # ----------------------- # Download cfg and weights file if first time. CFG_NAME = MODEL_NAME + '.cfg' WEIGHTS_NAME = MODEL_NAME + '.weights' REPO_URL = 'https://github.com/dmlc/web-data/blob/master/darknet/' CFG_URL = REPO_URL + 'cfg/' + CFG_NAME + '?raw=true' WEIGHTS_URL = REPO_URL + 'weights/' + WEIGHTS_NAME + '?raw=true' cfg_path = download_testdata(CFG_URL, CFG_NAME, module='darknet') weights_path = download_testdata(WEIGHTS_URL, WEIGHTS_NAME, module='darknet') # Download and Load darknet library DARKNET_LIB = 'libdarknet.so' DARKNET_URL = REPO_URL + 'lib/' + DARKNET_LIB + '?raw=true' lib_path = download_testdata(DARKNET_URL, DARKNET_LIB, module='darknet') DARKNET_LIB = __darknetffi__.dlopen(lib_path) net = DARKNET_LIB.load_network(cfg_path.encode('utf-8'), weights_path.encode('utf-8'), 0) dtype = 'float32' batch_size = 1 # Import the graph to NNVM # ------------------------ # Import darknet graph definition to nnvm.
# Target settings # Use these commented settings to build for cuda. # target = tvm.target.Target("cuda", host="llvm") # layout = "NCHW" # dev = tvm.cuda(0) target = tvm.target.Target("llvm", host="llvm") layout = None dev = tvm.cpu(0) ###################################################################### # Download required files # ----------------------- # Download files listed above. from tvm.contrib.download import download_testdata img_path = download_testdata(image_url, img_name, module="data") model_path = download_testdata(model_url, model_name, module=["tf", "InceptionV1"]) map_proto_path = download_testdata(map_proto_url, map_proto, module="data") label_path = download_testdata(label_map_url, label_map, module="data") ###################################################################### # Import model # ------------ # Creates tensorflow graph definition from protobuf file. with tf_compat_v1.gfile.GFile(model_path, "rb") as f: graph_def = tf_compat_v1.GraphDef() graph_def.ParseFromString(f.read()) graph = tf.import_graph_def(graph_def, name="")
# -------------------------------------- # # Load the pretrained TFLite model from a file in your current # directory into a buffer import os import numpy as np import tvm import tvm.micro as micro from tvm.contrib.download import download_testdata from tvm.contrib import graph_runtime, utils from tvm import relay model_url = "https://people.linaro.org/~tom.gall/sine_model.tflite" model_file = "sine_model.tflite" model_path = download_testdata(model_url, model_file, module="data") tflite_model_buf = open(model_path, "rb").read() ###################################################################### # Using the buffer, transform into a tflite model python object try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) ######################################################################
import tvm.relay as relay from tvm.contrib.download import download_testdata ###################################################################### # Load pretrained ONNX model # --------------------------------------------- # The example super resolution model used here is exactly the same model in onnx tutorial # http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html # we skip the pytorch model construction part, and download the saved onnx model model_url = ''.join([ 'https://gist.github.com/zhreshold/', 'bcda4716699ac97ea44f791c24310193/raw/', '93672b029103648953c4e5ad3ac3aadf346a4cdc/', 'super_resolution_0.2.onnx' ]) model_path = download_testdata(model_url, 'super_resolution.onnx', module='onnx') # now you have super_resolution.onnx on disk onnx_model = onnx.load(model_path) ###################################################################### # Load a test image # --------------------------------------------- # A single cat dominates the examples! from PIL import Image img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true' img_path = download_testdata(img_url, 'cat.png', module='data') img = Image.open(img_path).resize((224, 224)) img_ycbcr = img.convert("YCbCr") # convert to YCbCr img_y, img_cb, img_cr = img_ycbcr.split() x = np.array(img_y)[np.newaxis, np.newaxis, :, :]
""" import tvm import tvm.relay as relay from tvm.contrib.download import download_testdata import coremltools as cm import numpy as np from PIL import Image ###################################################################### # Load pretrained CoreML model # ---------------------------- # We will download and load a pretrained mobilenet classification network # provided by apple in this example model_url = 'https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel' model_file = 'mobilenet.mlmodel' model_path = download_testdata(model_url, model_file, module='coreml') # Now you have mobilenet.mlmodel on disk mlmodel = cm.models.MLModel(model_path) ###################################################################### # Load a test image # ------------------ # A single cat dominates the examples! img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true' img_path = download_testdata(img_url, 'cat.png', module='data') img = Image.open(img_path).resize((224, 224)) x = np.transpose(img, (2, 0, 1))[np.newaxis, :] ###################################################################### # Compile the model on Relay # ---------------------------
from tvm.relay.testing.darknet import __darknetffi__ # Model name MODEL_NAME = 'yolov3' ###################################################################### # Download required files # ----------------------- # Download cfg and weights file if first time. CFG_NAME = MODEL_NAME + '.cfg' WEIGHTS_NAME = MODEL_NAME + '.weights' REPO_URL = 'https://github.com/siju-samuel/darknet/blob/master/' CFG_URL = REPO_URL + 'cfg/' + CFG_NAME + '?raw=true' WEIGHTS_URL = 'https://pjreddie.com/media/files/' + WEIGHTS_NAME cfg_path = download_testdata(CFG_URL, CFG_NAME, module="darknet") weights_path = download_testdata(WEIGHTS_URL, WEIGHTS_NAME, module="darknet") # Download and Load darknet library if sys.platform in ['linux', 'linux2']: DARKNET_LIB = 'libdarknet2.0.so' DARKNET_URL = REPO_URL + 'lib/' + DARKNET_LIB + '?raw=true' elif sys.platform == 'darwin': DARKNET_LIB = 'libdarknet_mac2.0.so' DARKNET_URL = REPO_URL + 'lib_osx/' + DARKNET_LIB + '?raw=true' else: err = "Darknet lib is not supported on {} platform".format(sys.platform) raise NotImplementedError(err) lib_path = download_testdata(DARKNET_URL, DARKNET_LIB, module="darknet")
MODEL_NAME = 'rnn' #Seed value seed = 'Thus' #Number of characters to predict num = 1000 # Download required files # ----------------------- # Download cfg and weights file if first time. CFG_NAME = MODEL_NAME + '.cfg' WEIGHTS_NAME = MODEL_NAME + '.weights' REPO_URL = 'https://github.com/dmlc/web-data/blob/master/darknet/' CFG_URL = REPO_URL + 'cfg/' + CFG_NAME + '?raw=true' WEIGHTS_URL = REPO_URL + 'weights/' + WEIGHTS_NAME + '?raw=true' cfg_path = download_testdata(CFG_URL, CFG_NAME, module='darknet') weights_path = download_testdata(WEIGHTS_URL, WEIGHTS_NAME, module='darknet') # Download and Load darknet library DARKNET_LIB = 'libdarknet.so' DARKNET_URL = REPO_URL + 'lib/' + DARKNET_LIB + '?raw=true' lib_path = download_testdata(DARKNET_URL, DARKNET_LIB, module='darknet') DARKNET_LIB = __darknetffi__.dlopen(lib_path) net = DARKNET_LIB.load_network(cfg_path.encode('utf-8'), weights_path.encode('utf-8'), 0) dtype = 'float32' batch_size = 1 # Import the graph to NNVM # ------------------------ # Import darknet graph definition to nnvm. #
def test_trt_int8(): """ This Function is used to use tensorrt int8 to compile a resnet34 model, and compare cosine distance between the output of the original model and trt int8 tvm ouput """ if skip_codegen_test() or skip_runtime_test(): return try: from PIL import Image from scipy.spatial import distance except: print("please install scipy and Image python packages") return try: import torch import torchvision from torchvision import transforms except: print("please install pytorch python package") return os.environ["TVM_TENSORRT_USE_INT8"] = "1" os.environ["TENSORRT_NUM_CALI_INT8"] = "10" model_name = "resnet34" model = getattr(torchvision.models, model_name)(pretrained=True) model = model.eval() # We grab the TorchScripted model via tracing input_shape = [1, 3, 224, 224] input_data = torch.randn(input_shape) scripted_model = torch.jit.trace(model, input_data).eval() img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") img = Image.open(img_path).resize((224, 224)) my_preprocess = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) img = my_preprocess(img) img = np.expand_dims(img, 0) input_name = "input0" shape_list = [(input_name, img.shape)] mod, params = relay.frontend.from_pytorch(scripted_model, shape_list) # compile the model target = "cuda" dev = tvm.cuda() mod = partition_for_tensorrt(mod, params) with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) gen_module = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) num_cali_int8 = int(os.environ["TENSORRT_NUM_CALI_INT8"]) if num_cali_int8 != 0: print("start calibrating data ... ") for i in range(num_cali_int8): tvm_data = tvm.nd.array(img) gen_module.set_input(input_name, tvm_data) gen_module.run(data=tvm_data) print("finished calibrating data ... ") # get output of tvm model print("rebuild engine and test to run ... ") tvm_data = tvm.nd.array(img) gen_module.set_input(input_name, tvm_data) gen_module.run(data=tvm_data) out = gen_module.get_output(0) # check output of tvm and output of pytorch model are equal torch_data = torch.from_numpy(img) model = scripted_model.eval() torch_output = model(torch_data) cosine_distance_res = distance.cosine(out.numpy(), torch_output.detach().cpu().numpy()) assert cosine_distance_res <= 0.01 # Evaluate print("Evaluate inference time cost...") ftimer = gen_module.module.time_evaluator("run", dev, repeat=10, min_repeat_ms=500) prof_res = np.array(ftimer().results) * 1e3 # convert to millisecond message = "Mean inference time (std dev): %.2f ms (%.2f ms)" % ( np.mean(prof_res), np.std(prof_res), ) print(message)
#target = 'cuda' #target_host = 'llvm' #layout = "NCHW" #ctx = tvm.gpu(0) target = 'llvm' target_host = 'llvm' layout = None ctx = tvm.cpu(0) ###################################################################### # Download required files # ----------------------- # Download files listed above. from tvm.contrib.download import download_testdata img_path = download_testdata(image_url, img_name, module='data') model_path = download_testdata(model_url, model_name, module=['tf', 'InceptionV1']) map_proto_path = download_testdata(map_proto_url, map_proto, module='data') label_path = download_testdata(label_map_url, label_map, module='data') ###################################################################### # Import model # ------------ # Creates tensorflow graph definition from protobuf file. with tf.gfile.FastGFile(model_path, 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) graph = tf.import_graph_def(graph_def, name='') # Call the utility to import the graph definition into default graph. graph_def = tf_testing.ProcessGraphDefParam(graph_def)
from nnvm.testing.darknet import __darknetffi__ # Model name MODEL_NAME = 'yolov3' ###################################################################### # Download required files # ----------------------- # Download cfg and weights file if first time. CFG_NAME = MODEL_NAME + '.cfg' WEIGHTS_NAME = MODEL_NAME + '.weights' REPO_URL = 'https://github.com/siju-samuel/darknet/blob/master/' CFG_URL = REPO_URL + 'cfg/' + CFG_NAME + '?raw=true' WEIGHTS_URL = 'https://pjreddie.com/media/files/' + WEIGHTS_NAME cfg_path = download_testdata(CFG_URL, CFG_NAME, module="darknet") weights_path = download_testdata(WEIGHTS_URL, WEIGHTS_NAME, module="darknet") # Download and Load darknet library if sys.platform in ['linux', 'linux2']: DARKNET_LIB = 'libdarknet2.0.so' DARKNET_URL = REPO_URL + 'lib/' + DARKNET_LIB + '?raw=true' elif sys.platform == 'darwin': DARKNET_LIB = 'libdarknet_mac2.0.so' DARKNET_URL = REPO_URL + 'lib_osx/' + DARKNET_LIB + '?raw=true' else: err = "Darknet lib is not supported on {} platform".format(sys.platform) raise NotImplementedError(err) lib_path = download_testdata(DARKNET_URL, DARKNET_LIB, module="darknet")
'ssd_512_resnet101_v2_voc', 'ssd_512_mobilenet1.0_voc', 'ssd_512_mobilenet1.0_coco', 'ssd_300_vgg16_atrous_voc' 'ssd_512_vgg16_atrous_coco', ] model_name = supported_model[0] dshape = (1, 3, 512, 512) target_list = ctx_list() ###################################################################### # Download and pre-process demo image im_fname = download_testdata('https://github.com/dmlc/web-data/blob/master/' + 'gluoncv/detection/street_small.jpg?raw=true', 'street_small.jpg', module='data') x, img = data.transforms.presets.ssd.load_test(im_fname, short=512) ###################################################################### # Convert and compile model for CPU. block = model_zoo.get_model(model_name, pretrained=True) def build(target): mod, params = relay.frontend.from_mxnet(block, {"data": dshape}) with relay.build_config(opt_level=3): graph, lib, params = relay.build(mod[mod.entry_func], target, params=params) return graph, lib, params ######################################################################
# We will use pre-trained model from # `MXNet Gluon model zoo <https://mxnet.incubator.apache.org/api/python/gluon/model_zoo.html>`_. # You can found more details about this part at tutorial :ref:`tutorial-from-mxnet`. from mxnet.gluon.model_zoo.vision import get_model from PIL import Image import numpy as np # only one line to get the model block = get_model('resnet18_v1', pretrained=True) ###################################################################### # In order to test our model, here we download an image of cat and # transform its format. img_name = 'cat.png' img_path = download_testdata('https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true', img_name, module='data') image = Image.open(img_path).resize((224, 224)) def transform_image(image): image = np.array(image) - np.array([123., 117., 104.]) image /= np.array([58.395, 57.12, 57.375]) image = image.transpose((2, 0, 1)) image = image[np.newaxis, :] return image x = transform_image(image) ###################################################################### # synset is used to transform the label from number of ImageNet class to # the word human can understand. synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/',
def get_resnet50(): url = 'https://docs-assets.developer.apple.com/coreml/models/Resnet50.mlmodel' dst = 'resnet50.mlmodel' real_dst = download_testdata(url, dst, module='coreml') return real_dst
def _load_net(cfg_url, cfg_name, weights_url, weights_name): cfg_path = download_testdata(cfg_url, cfg_name, module='darknet') weights_path = download_testdata(weights_url, weights_name, module='darknet') net = LIB.load_network(cfg_path.encode('utf-8'), weights_path.encode('utf-8'), 0) return net
tar.extractall(path=dir_path) tar.close() else: raise RuntimeError('Could not decompress the file: ' + path) ###################################################################### # Load pretrained TFLite model # --------------------------------------------- # we load mobilenet V1 TFLite model provided by Google from tvm.contrib.download import download_testdata model_url = "http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz" # we download model tar file and extract, finally get mobilenet_v1_1.0_224.tflite model_path = download_testdata(model_url, "mobilenet_v1_1.0_224.tgz", module=['tf', 'official']) model_dir = os.path.dirname(model_path) extract(model_path) # now we have mobilenet_v1_1.0_224.tflite on disk and open it tflite_model_file = os.path.join(model_dir, "mobilenet_v1_1.0_224.tflite") tflite_model_buf = open(tflite_model_file, "rb").read() # get TFLite model from buffer import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) ###################################################################### # Load a test image # --------------------------------------------- # A single cat dominates the examples!
def download_model_zoo(model_dir, model_name, url='http://people.linaro.org/~tom.gall/model_zoo/'): model_url = url + model_dir + model_name model_path = download_testdata(model_url, model_name, module=["tf", "official"]) model_dir = os.path.dirname(model_path) return model_dir
def get_mobilenet(): url = 'https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel' dst = 'mobilenet.mlmodel' real_dst = download_testdata(url, dst, module='coreml') return real_dst
# # The weights are trained with https://github.com/dmlc/dgl/blob/master/examples/pytorch/gcn/train.py from tvm.contrib.download import download_testdata from dgl import DGLGraph features = torch.FloatTensor(data.features) dgl_g = DGLGraph(g) torch_model = GCN(dgl_g, infeat_dim, num_hidden, num_classes, num_layers, F.relu) # Download the pretrained weights model_url = "https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_%s.torch" % ( dataset) model_path = download_testdata(model_url, "gcn_%s.pickle" % (dataset), module="gcn_model") # Load the weights into the model torch_model.load_state_dict(torch.load(model_path)) ###################################################################### # Run the DGL model and test for accuracy # --------------------------------------- torch_model.eval() with torch.no_grad(): logits_torch = torch_model(features) print("Print the first five outputs from DGL-PyTorch execution\n", logits_torch[:5]) acc = evaluate(data, logits_torch.numpy())