def _load_model(model_path, prefix=None): if 'tf2' in model_path: from tf2cv.model_provider import get_model as tf2cv_get_model # type: ignore _model = tf2cv_get_model(model_path.split('/')[-1].split('-')[0], pretrained=False, data_format="channels_last") _model.build(input_shape=(1, 224, 224, 3)) _model.load_weights(model_path) _model.trainable = False else: _model = tf.keras.models.load_model(model_path) _model.trainable = False if prefix is not None: for weight in _model.weights: weight._handle_name = prefix + '_' + weight.name # pylint: disable=W0212 return _model
def main(): """ Main body of script. """ args = parse_args() model = tf2cv_get_model(args.model, pretrained=True) x = tf.zeros(shape=args.input_shape) _ = model.predict(x) # Convert the model. converter = tf.lite.TFLiteConverter.from_keras_model(model) tflite_model = converter.convert() # Load TFLite model and allocate tensors. interpreter = tf.lite.Interpreter(model_content=tflite_model) interpreter.allocate_tensors() # Get input and output tensors. input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() # Test the TensorFlow Lite model on random input data. input_shape = input_details[0]["shape"] input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32) interpreter.set_tensor(input_details[0]["index"], input_data) interpreter.invoke() # The function `get_tensor()` returns a copy of the tensor data. # Use `tensor()` in order to get a pointer to the tensor. tflite_results = interpreter.get_tensor(output_details[0]["index"]) # Test the TensorFlow model on random input data. tf_results = model(tf.constant(input_data)) # Compare the result. for tf_result, tflite_result in zip(tf_results, tflite_results): np.testing.assert_almost_equal(tf_result, tflite_result, decimal=5) if args.output_dir is not None: open("{}/{}.tflite".format(args.output_dir, args.model), "wb").write(tflite_model) print("All OK.")
from tf2cv.model_provider import get_model as tf2cv_get_model import tensorflow as tf from gluoncv.data import ImageNet import numpy as np from NN.img2tensor import convert_image net = tf2cv_get_model("AlexNet", pretrained=True, data_format="channels_last") x = convert_image("img_23.jpg") x = ((x - np.array((0.485, 0.456, 0.406))) / np.array( (0.229, 0.224, 0.225))).reshape(1, 224, 224, 3) y = net(x) probs = tf.nn.softmax(y) top_k = 5 probs_np = probs.numpy().squeeze(axis=0) top_k_inds = probs_np.argsort()[::-1][:top_k] classes = ImageNet().classes print("The input picture is classified to be:") for k in range(top_k): print("{idx}: [{class_name}], with probability {prob:.3f}.".format( idx=(k + 1), class_name=classes[top_k_inds[k]], prob=probs_np[top_k_inds[k]]))
import random from pylab import rcParams import os import math from tf2cv.model_provider import get_model as tf2cv_get_model policy = mixed_precision.Policy('mixed_float16') mixed_precision.set_policy(policy) tf.keras.regularizers.l2(l2=0.01) datagen = ImageDataGenerator(rescale=1. / 255, horizontal_flip=True) train_csv = pd.read_csv(r"/content/train.csv") train_csv["label"] = train_csv["label"].astype(str) base_model = tf2cv_get_model("seresnext101_32x4d", pretrained=False, data_format="channels_last") train = train_csv.iloc[:int(len(train_csv) * 0.8), :] test = train_csv.iloc[int(len(train_csv) * 0.8):, :] print((len(train), len(test))) base_model.trainable = True fold_number = 0 n_splits = 5 oof_accuracy = [] first_decay_steps = 500 lr = (tf.keras.experimental.CosineDecayRestarts(0.04, first_decay_steps)) opt = tf.keras.optimizers.SGD(lr)
def main(): """ Main body of script. """ args = parse_args() # Load a testing image: image = cv2.imread(args.image, flags=cv2.IMREAD_COLOR) # cv2.imshow("image", image) # cv2.waitKey(0) # cv2.destroyAllWindows() image = cv2.cvtColor(image, code=cv2.COLOR_BGR2RGB) # Resize image with keeping aspect ratio: resize_value = int( math.ceil(float(args.input_size) / args.resize_inv_factor)) h, w = image.shape[:2] if not ((w == resize_value and w <= h) or (h == resize_value and h <= w)): resize_size = (resize_value, int(resize_value * h / w)) if w < h else (int(resize_value * w / h), resize_value) image = cv2.resize(image, dsize=resize_size, interpolation=cv2.INTER_LINEAR) # Center crop of the image: h, w = image.shape[:2] th, tw = args.input_size, args.input_size ih = int(round(0.5 * (h - th))) jw = int(round(0.5 * (w - tw))) image = image[ih:(ih + th), jw:(jw + tw), :] # cv2.imshow("image2", image) # cv2.waitKey(0) # cv2.destroyAllWindows() # Convert image to a float tensor and normalize it: x = image.astype(np.float32) x = x / 255.0 x = (x - np.array(args.mean_rgb)) / np.array(args.std_rgb) # Set No-GPU mode: if args.num_gpus == 0: tf.config.set_visible_devices([], "GPU") # Convert the tensor to a MXNet nd-array: x = np.expand_dims(x, axis=0) x = tf.convert_to_tensor(x, dtype=np.float32) # Create model with loading pretrained weights: net = tf2cv_get_model(args.model, pretrained=True) # Evaluate the network: y = net(x) probs = tf.nn.softmax(y) # Show results: top_k = 5 probs_np = probs.numpy().squeeze(axis=0) top_k_inds = probs_np.argsort()[::-1][:top_k] classes = ImageNet1kAttr().classes print("The input picture is classified to be:") for k in range(top_k): print("{idx}: [{class_name}], with probability {prob:.3f}.".format( idx=(k + 1), class_name=classes[top_k_inds[k]], prob=probs_np[top_k_inds[k]]))
def main(): """ Main body of script. """ gpus = tf.config.experimental.list_physical_devices("GPU") if gpus: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) args = parse_args() if args.input: net_extra_kwargs = {"in_size": args.input_shape[1:3]} model = prepare_model(model_name=args.model, use_pretrained=False, pretrained_model_file_path=args.input, net_extra_kwargs=net_extra_kwargs) else: model = tf2cv_get_model(args.model, pretrained=True) x = tf.zeros(shape=args.input_shape) _ = model.predict(x) # Convert the model. converter = tf.lite.TFLiteConverter.from_keras_model(model) # converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_LATENCY] # converter.optimizations = [tf.lite.Optimize.DEFAULT] # dataset = np.load(args.dataset) # def representative_dataset_gen(): # for i in range(len(dataset)): # yield [dataset[i:i + 1]] # converter.representative_dataset = representative_dataset_gen # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] # converter.inference_input_type = tf.int8 # converter.inference_output_type = tf.int8 # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS] tflite_model = converter.convert() if args.output_dir is not None: open("{}/{}.tflite".format(args.output_dir, args.model), "wb").write(tflite_model) # Load TFLite model and allocate tensors. interpreter = tf.lite.Interpreter(model_content=tflite_model) interpreter.allocate_tensors() # Get input and output tensors. input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() # Test the TensorFlow Lite model on random input data. input_shape = input_details[0]["shape"] input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32) interpreter.set_tensor(input_details[0]["index"], input_data) interpreter.invoke() # The function `get_tensor()` returns a copy of the tensor data. # Use `tensor()` in order to get a pointer to the tensor. tflite_results = interpreter.get_tensor(output_details[0]["index"]) # Test the TensorFlow model on random input data. tf_results = model(tf.constant(input_data)) # Compare the result. for tf_result, tflite_result in zip(tf_results, tflite_results): np.testing.assert_almost_equal(tf_result[0], tflite_result, decimal=5) print("All OK.")