def test_end2end_mobilenet_export(): # export preprocessing preproc_onnx = build_dir + "/end2end_mobilenet_preproc.onnx" mean = [0.485, 0.456, 0.406] std = 0.226 ch = 3 preproc = NormalizePreProc(mean, std, ch) bo.export_finn_onnx(preproc, (1, 3, 224, 224), preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) # set input finn datatype to UINT8 preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType["UINT8"]) preproc_model = preproc_model.transform(InferShapes()) preproc_model = preproc_model.transform(FoldConstants()) preproc_model = preproc_model.transform(GiveUniqueNodeNames()) preproc_model = preproc_model.transform(GiveUniqueParameterTensors()) preproc_model = preproc_model.transform(GiveReadableTensorNames()) preproc_model.save(build_dir + "/end2end_mobilenet_preproc.onnx") # export mobilenet finn_onnx = build_dir + "/end2end_mobilenet_export.onnx" mobilenet = get_test_model_trained("mobilenet", 4, 4) bo.export_finn_onnx(mobilenet, (1, 3, 224, 224), finn_onnx) # calculate golden output with pytorch/brevitas and save as .npy # get single image as input and prepare image img = Image.open("/workspace/finn/tests/brevitas/king_charles.jpg") # resize smallest side of the image to 256 pixels and resize larger side # with same ratio img = resize_smaller_side(256, img) # crop central 224*224 window img = crop_center(224, img) # save image as numpy array and as torch tensor to enable testing in # brevitas/pytorch and finn and transpose from (H, W, C) to (C, H, W) img_np = np.asarray(img).copy().astype(np.float32).transpose(2, 0, 1) img_np = img_np.reshape(1, 3, 224, 224) np.save(build_dir + "/end2end_mobilenet_input.npy", img_np) img_torch = torch.from_numpy(img_np).float() # do forward pass in PyTorch/Brevitas input_tensor = preproc.forward(img_torch) golden = mobilenet.forward(input_tensor).detach().numpy() golden_topk = golden.flatten() golden_top5 = np.argsort(golden_topk)[-5:] golden_top5 = np.flip(golden_top5) golden_top5_prob = [] for index in golden_top5: golden_top5_prob.append(golden_topk[index]) # save golden output values np.save(build_dir + "/end2end_mobilenet_golden_top5.npy", golden_top5) np.save(build_dir + "/end2end_mobilenet_golden_top5_prob.npy", golden_top5_prob) assert os.path.isfile(finn_onnx) assert os.path.isfile(build_dir + "/end2end_mobilenet_preproc.onnx")
def test_brevitas_mobilenet(): # get single image as input and prepare image img = Image.open("/workspace/finn/tests/brevitas/king_charles.jpg") # resize smallest side of the image to 256 pixels and resize larger side # with same ratio img = resize_smaller_side(256, img) # crop central 224*224 window img = crop_center(224, img) # save image as numpy array and as torch tensor to enable testing in # brevitas/pytorch and finn and transpose from (H, W, C) to (C, H, W) img_np = np.asarray(img).copy().astype(np.float32).transpose(2, 0, 1) img_np = img_np.reshape(1, 3, 224, 224) img_torch = torch.from_numpy(img_np).float() # export preprocess export_onnx_path = make_build_dir("test_brevitas_mobilenet-v1_") preproc_onnx = export_onnx_path + "/quant_mobilenet_v1_4b_preproc.onnx" mean = [0.485, 0.456, 0.406] std = 0.226 ch = 3 preproc = NormalizePreProc(mean, std, ch) bo.export_finn_onnx(preproc, (1, 3, 224, 224), preproc_onnx) preproc_model = ModelWrapper(preproc_onnx) # set input finn datatype to UINT8 preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType.UINT8) preproc_model = preproc_model.transform(InferShapes()) preproc_model = preproc_model.transform(GiveUniqueNodeNames()) preproc_model = preproc_model.transform(GiveUniqueParameterTensors()) preproc_model = preproc_model.transform(GiveReadableTensorNames()) finn_onnx = export_onnx_path + "/quant_mobilenet_v1_4b_exported.onnx" mobilenet = get_test_model_trained("mobilenet", 4, 4) bo.export_finn_onnx(mobilenet, (1, 3, 224, 224), finn_onnx) # do forward pass in PyTorch/Brevitas input_tensor = preproc.forward(img_torch) expected = mobilenet.forward(input_tensor).detach().numpy() expected_topk = expected.flatten() expected_top5 = np.argsort(expected_topk)[-5:] expected_top5 = np.flip(expected_top5) expected_top5_prob = [] for index in expected_top5: expected_top5_prob.append(expected_topk[index]) model = ModelWrapper(finn_onnx) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) model = model.transform(InsertTopK()) # get initializer from Mul that will be absorbed into topk a0 = model.get_initializer(model.graph.node[-2].input[1]) model = model.transform(absorb.AbsorbScalarMulAddIntoTopK()) model = model.transform(InferShapes()) model = model.transform(InferDataTypes()) model = model.transform(InferDataLayouts()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(GiveUniqueParameterTensors()) model = model.transform(GiveReadableTensorNames()) model.save(export_onnx_path + "/quant_mobilenet_v1_4b_wo_preproc.onnx") model = model.transform(MergeONNXModels(preproc_model)) model.save(export_onnx_path + "/quant_mobilenet_v1_4b.onnx") idict = {model.graph.input[0].name: img_np} odict = oxe.execute_onnx(model, idict, True) produced = odict[model.graph.output[0].name] produced_prob = odict["TopK_0_out0"] * a0 assert (produced.flatten() == expected_top5).all() assert np.isclose(produced_prob.flatten(), expected_top5_prob).all()