def loader(image_path, convert_color=None, transpose=(0, 1, 2), dtype=np.float32): try: # image = cv2.imread(image_path) # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # image = np.transpose(image, transpose) # image = np.array(image, dtype=dtype) # image = image / 255 # image = np.array([image]) image = cv2.imread(image_path) if convert_color is not None: image = cv2.cvtColor(image, convert_color) image = np.transpose(image, transpose) image = np.array(image, dtype=dtype) image = np.array([image]) # image.div_(255).sub_(0.5).div_(0.5) return image except Exception as ex: exception_printer('Load image \'' + str(image_path) + '\' failed.')
def load_mtcnn_model(self, model_path): print('\nStarting load MTCNN tensorflow model \'' + str(model_path) + '\'...') start_time = time.time() try: graph = tf.Graph() with graph.as_default(): with open(model_path, 'rb') as f: graph_def = tf.compat.v1.GraphDef.FromString(f.read()) tf.compat.v1.import_graph_def(graph_def=graph_def, name='') self.graph = graph config = tf.compat.v1.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=4, inter_op_parallelism_threads=4) config.gpu_options.allow_growth = True self.sess = tf.compat.v1.Session(graph=graph, config=config) # initialize mtcnn model self.init_mtcnn_model() except Exception as ex: exception_printer('Load MTCNN model failed. ') print('Load MTCNN tensorflow model success. Cost time: ' + str(time.time() - start_time))
def load_tensorflow_pb_model(self, tensorflow_pb_model_path, input_name, output_name): print('\nStarting load tensorflow pb model \'' + str(tensorflow_pb_model_path) + '\'...') start_time = time.time() try: with open(tensorflow_pb_model_path, 'rb') as f: output_graph_def = tf.compat.v1.GraphDef() output_graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(output_graph_def, name="") self.tensorflow_pb_model_sess = tf.compat.v1.Session() self.tensorflow_pb_model_sess.as_default() self.input = self.tensorflow_pb_model_sess.graph.get_tensor_by_name( input_name) self.output = self.tensorflow_pb_model_sess.graph.get_tensor_by_name( output_name) print('Load tensorflow pb model success. Cost time: ', time.time() - start_time) return self.tensorflow_pb_model_sess, self.input, self.output except Exception as ex: exception_printer('Load tensorflow pb model failed.') return None
def convert(self, pytorch_model_path, pytorch_weight_path, input_shape, onnx_model_output_path, input_names, output_names): self.load_pytorch_model(pytorch_model_path, pytorch_weight_path, input_shape) start_time = time.time() print('\nStarting convert to ONNX model ...') try: image = torch.empty(size=(1, *input_shape), dtype=torch.float, device=self.device) torch.onnx.export(model=self.pytorch_model, args=image, f=onnx_model_output_path, verbose=False, input_names=input_names, output_names=output_names) print('ONNX model export success, saved as ' + str(onnx_model_output_path) + '. Cost time: ' + str(time.time() - start_time) + 's.') except Exception as ex: exception_printer('Convert to ONNX model failed.')
def convert(self, tensorflow_pb_model_path, tensorflow_lite_model_output_path, input_names, output_names, quant): print('\nStarting convert to tensorflow lite ...') start_time = time.time() try: converter = tf.compat.v1.lite.TFLiteConverter.from_frozen_graph( graph_def_file=tensorflow_pb_model_path, input_arrays=input_names, output_arrays=output_names) if quant: converter.optimizations = [tf.lite.Optimize.DEFAULT] # converter.target_spec.supported_types = [tf.float16] # converter.target_spec.supported_ops = [tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8] # converter.inference_type = tf.uint8 converter.target_spec.supported_ops = [ tf.lite.OpsSet. EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 ] tflite_model = converter.convert() with open(tensorflow_lite_model_output_path, 'wb') as f: f.write(tflite_model) print('Tensorflow lite export success, saved as ' + str(tensorflow_lite_model_output_path) + '. Cost time: ' + str(time.time() - start_time)) except Exception as ex: exception_printer('Convert to tensorflow failed.')
def convert(self, onnx_model_path, tensorflow_pb_model_output_path): self.load_onnx_model(onnx_model_path) start_time = time.time() print('\nStarting convert to tensorflow pb model ...') try: onnx_tf_exporter = prepare(self.onnx_model) onnx_tf_exporter.export_graph(tensorflow_pb_model_output_path) print('Tensorflow export success, saved as ' + str(tensorflow_pb_model_output_path) + '. Cost time: ' + str(time.time() - start_time)) except Exception as ex: exception_printer('Convert to tensorflow pb model failed.')
def test_tensorflow_lite_model_by_tensorflow_pb_model( self, test_image_path, tensorflow_lite_model, tensorflow_lite_input, tensorflow_lite_output, tensorflow_pb_model, tensorflow_input, tensorflow_output): print( '\nStarting test tensorflow lite model by tensorflow pb model ...') start_time = time.time() try: image = ImageLoader.loader(image_path=test_image_path, transpose=(2, 0, 1), dtype=np.float32) print('Test image \'' + str(test_image_path) + '\', shape: ' + str(image.shape)) # Tensorflow lite tensorflow_lite_model.set_tensor(tensorflow_lite_input[0]['index'], image) tensorflow_lite_model.invoke() feature_by_tensorflow_lite = tensorflow_lite_model.get_tensor( tensorflow_lite_output[0]['index']) print( 'Feature by tensorflow lite ' + str(feature_by_tensorflow_lite[0].shape) + ': \n', feature_by_tensorflow_lite[0]) # Tensorflow pb feature_by_tensorflow = tensorflow_pb_model.run( tensorflow_output, feed_dict={tensorflow_input: image}) print( 'Feature by tensorflow ' + str(feature_by_tensorflow.shape) + '\n', feature_by_tensorflow) compare_utils = Compare_Util(False) _, _, cosine_distance, _, _ = compare_utils.compare_feature( CompareDistanceType.Cosine, feature_by_tensorflow_lite[0], feature_by_tensorflow[0], 0.6, 0.6, 0.6) print('Distance: ', cosine_distance) print('Test finish. Cost time: ' + str(time.time() - start_time) + 's.') except Exception as ex: exception_printer( 'Test tensorflow lite model by tensorflow pb model failed.')
def load_onnx_model(self, onnx_model_path): print('\nStarting load ONNX model \'' + str(onnx_model_path) + '\'...') start_time = time.time() try: self.onnx_model = onnx.load(onnx_model_path) self.onnx_sess = onnxrt.InferenceSession(onnx_model_path) self.input_name = self.onnx_sess.get_inputs()[0].name self.output_name = self.onnx_sess.get_outputs()[0].name print('Input name: ', self.input_name) print('Output name: ', self.output_name) print('Load ONNX model success. Cost time: ' + str(time.time() - start_time) + 's.') return self.onnx_model, self.onnx_sess except Exception as ex: exception_printer('Load ONNX model failed.') return None
def load_insightface_pytorch_model(self, model_name=None, pytorch_model_path=None, pytorch_weight_path=None, input_shape=(3, 112, 112), train=False): start_time = time.time() if pytorch_model_path is not None: print('\nStarting load insightface pytorch model \'' + str(pytorch_model_path) + '\'...') try: self.pytorch_model = torch.load(pytorch_model_path) except Exception as ex: exception_printer('Load pytorch model failed.') return None elif model_name is not None and pytorch_weight_path is not None: print('\nStarting load insightface pytorch model name: ' + str(model_name) + ', weight: \'' + str(pytorch_weight_path) + '\'...') try: self.pytorch_model = get_model(name=model_name) self.pytorch_model.load_state_dict( torch.load(pytorch_weight_path)) except Exception as ex: exception_printer('Load pytorch weight failed.') return None self.pytorch_model.to(device=self.device) self.pytorch_model.train(train) summary(self.pytorch_model, input_size=input_shape) print('Load pytorch model success. Cost time: ' + str(time.time() - start_time) + 's.') return self.pytorch_model
def test_tensorflow_pb_model_by_onnx_model(self, test_image_path, tensorflow_pb_model, tensorflow_input, tensorflow_output, onnx_model_sess): print('\nStarting test Tensorflow pb model by ONNX model ...') start_time = time.time() try: image = ImageLoader.loader(image_path=test_image_path, transpose=(2, 0, 1), dtype=np.float32) print('Test image \'' + str(test_image_path) + '\', shape: ' + str(image.shape)) # Tensorflow pb feature_by_tensorflow = tensorflow_pb_model.run( tensorflow_output, feed_dict={tensorflow_input: image}) print( 'Feature by tensorflow ' + str(feature_by_tensorflow.shape) + '\n', feature_by_tensorflow) # ONNX input_name = onnx_model_sess.get_inputs()[0].name feature_by_onnx = onnx_model_sess.run(None, {input_name: image}) feature_by_onnx = np.array(feature_by_onnx[0]) print('Feature by ONNX ' + str(feature_by_onnx[0].shape) + ': \n', feature_by_onnx[0]) compare_utils = Compare_Util(False) _, _, cosine_distance, _, _ = compare_utils.compare_feature( CompareDistanceType.Cosine, feature_by_tensorflow[0], feature_by_onnx[0], 0.6, 0.6, 0.6) print('Distance: ', cosine_distance) print('Test finish. Cost time: ' + str(time.time() - start_time) + 's.') except Exception as ex: exception_printer('Test Tensorflow pb model by ONNX model failed.')
def test_onnx_model_by_pytorch_model(self, test_image_path, onnx_model_sess, pytorch_model): print('\nStarting test ONNX model by PyTorch model ...') start_time = time.time() try: image = ImageLoader.loader(image_path=test_image_path, transpose=(2, 0, 1), dtype=np.float32) print('Test image \'' + str(test_image_path) + '\', shape: ' + str(image.shape)) # ONNX input_name = onnx_model_sess.get_inputs()[0].name feature_by_onnx = onnx_model_sess.run(None, {input_name: image}) feature_by_onnx = np.array(feature_by_onnx[0]) print('Feature by ONNX ' + str(feature_by_onnx[0].shape) + ': \n', feature_by_onnx[0]) # PyTorch image_torch = torch.Tensor(image).cuda() feature_by_pytorch = pytorch_model(image_torch) feature_by_pytorch = feature_by_pytorch.cpu().detach().numpy() print( 'Feature by PyTorch ' + str(feature_by_pytorch[0].shape) + ': \n', feature_by_pytorch[0]) compare_utils = Compare_Util(False) _, _, cosine_distance, _, _ = compare_utils.compare_feature( CompareDistanceType.Cosine, feature_by_onnx[0], feature_by_pytorch[0], 0.6, 0.6, 0.6) print('Distance: ', cosine_distance) print('Test finish. Cost time: ' + str(time.time() - start_time) + 's.') except Exception as ex: exception_printer('Test ONNX model by PyTorch model failed.')
def load_pytorch_model(self, pytorch_model_path, pytorch_weight_path, input_shape, train): print('\nStarting load pytorch model \'' + str(pytorch_model_path) + '\'...') start_time = time.time() self.input_shape = input_shape # Load pytorch model try: self.pytorch_model = torch.load(pytorch_model_path) except Exception as ex: exception_printer('Load pytorch model failed.') return None # Load pytorch weight if pytorch_weight_path is not None: print('\nStarting load pytorch weight ', pytorch_weight_path, '...') try: self.pytorch_weight = torch.load(pytorch_weight_path) self.pytorch_model.load_state_dict(self.pytorch_weight) except Exception as ex: exception_printer('Load pytorch weight failed.') return None self.pytorch_model.to(device=self.device) self.pytorch_model.train(train) summary(self.pytorch_model, input_size=input_shape) print('Load pytorch model success. Cost time: ' + str(time.time() - start_time) + 's.') return self.pytorch_model