def convert_tensorflow(nlp: Pipeline, opset: int, output: str): if not is_tf_available(): raise Exception( "Cannot convert because TF is not installed. Please install tensorflow first." ) print( "/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\" ) try: import tensorflow as tf from keras2onnx import convert_keras, save_model, __version__ as k2ov print("Using framework TensorFlow: {}, keras2onnx: {}".format( tf.version.VERSION, k2ov)) # Build input_names, output_names, dynamic_axes, tokens = infer_shapes( nlp, "tf") # Forward nlp.model.predict(tokens.data) onnx_model = convert_keras(nlp.model, nlp.model.name, target_opset=opset) save_model(onnx_model, output) except ImportError as e: raise Exception( "Cannot import {} required to convert TF model to ONNX. Please install {} first." .format(e.name, e.name))
def kconversion(): model = keras.models.load_model('model_keras') plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True, rankdir='TB', expand_nested=True, dpi=96) onnx_model = keras2onnx.convert_keras(model, 'model0.onnx', debug_mode=True) output_model_path = "./model0.onnx" # and save the model in ONNX format keras2onnx.save_model(onnx_model, output_model_path) onnx_model = onnx.load("model0.onnx") s = MessageToJson(onnx_model) onnx_json = json.loads(s) # Convert JSON to String onnx_str = json.dumps(onnx_json) with open("model1.json", "w") as json_file: json_file.write(onnx_str) resp = make_response(onnx_str) resp.headers['Access-Control-Allow-Origin'] = '*' return resp
def convert(self, model, saved_path, model_name): """ Convert bert model (transformers) to onnx optimized model. :param model: Trained model from transformers. :param saved_path: The path to save onnx model. :param model_name: Choose a model name to save. :returns optimized_model: Optimized onnx model. optimized_model_saved_path: optimized model saved path. """ if not os.path.exists(saved_path): os.makedirs(saved_path) unoptimized_model_saved_path = os.path.join( saved_path, '{}.onnx'.format(model_name)) optimized_model_saved_path = os.path.join( saved_path, '{}_optimized.onnx'.format(model_name)) self.sample_inputs = self.tokenizer.encode_plus( "This is a sample input", return_tensors='tf') # Step 1: Convert origin transformers model to unoptimized ONNX model. model.predict(self.sample_inputs.data) unoptimized_model = convert_keras(model, model.name, target_opset=self.target_opset) save_model(unoptimized_model, unoptimized_model_saved_path) # Step 2: optimizations for trained model converted from Tensorflow(tf.keras) optimized_model = optimizer.optimize_model( unoptimized_model_saved_path, model_type='bert_keras', num_heads=self.num_heads, hidden_size=self.hidden_size) optimized_model.save_model_to_file(optimized_model_saved_path) return optimized_model, optimized_model_saved_path
def convert_keras_model_to_onnx(f_name_keras_model: str, f_name_model_weights: str, f_name_onnx_model: str) -> None: ''' Convert trained Keras model to ONNX Runtime format. Increases work speed of model by 10-30 times on the CPU (on Intel i7-10510U, with TensorFlow v1.X test phrase processing time is about 150 ms, with ONNX Runtime — about 5 ms). Only TensorFlow <= v2.2.2 is supported (source: https://github.com/onnx/keras-onnx)! 1. f_name_keras_model - name of .json file with the keras model 2. f_name_model_weights - name of .hdf5 file with keras model weights 3. f_name_onnx_model - name of .onnx file to save onnx model 4. returns None ''' print("[i] Loading keras model and its weights from '{}' and '{}'".format( f_name_keras_model, f_name_model_weights)) with open(f_name_keras_model, 'r') as f_model: model_json = f_model.read() model = tf.keras.models.model_from_json(model_json) model.load_weights(str(f_name_model_weights)) print('[i] Converting keras model to onnx...') onnx_model = keras2onnx.convert_keras(model, model.name) print("[i] Saving onnx model to '{}'".format(f_name_onnx_model)) keras2onnx.save_model(onnx_model, f_name_onnx_model)
def main(): A2B_generator = ResnetGenerator(input_shape=(FLAGS.img_size, FLAGS.img_size, 3)) B2A_generator = ResnetGenerator(input_shape=(FLAGS.img_size, FLAGS.img_size, 3)) ONNX_A2B_path = "C:/Users/Yuhwan/Documents/New/A2B_generator.onnx" ONNX_B2A_path = "C:/Users/Yuhwan/Documents/New/B2A_generator.onnx" image = tf.io.read_file("C:/Users/Yuhwan/Pictures/김유환.jpg") image = tf.image.decode_jpeg(image, 3) image = tf.image.resize(image, [256, 256]) / 127.5 - 1. image = tf.expand_dims(image, 0) if FLAGS.pre_checkpoint: # This is just example (how to get the previous weight files in onnx) ckpt = tf.train.Checkpoint(A2B_generator) ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.pre_checkpoint_path, 5) if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print("Restored!!!") onnx_model = keras2onnx.convert_keras(A2B_generator, A2B_generator.name) content = onnx_model.SerializeToString() keras2onnx.save_model(onnx_model, ONNX_A2B_path)
def save_onnx(self, model_dir: str, version: int = 1): """Save/Export Critic model in ONNX format""" critic_model_save_path = os.path.join(model_dir, "critic", str(version), "model.onnx") onnx_model = keras2onnx.convert_keras(self.model, self.model.name) keras2onnx.save_model(onnx_model, critic_model_save_path) print(f"Critic model saved in ONNX format at:{critic_model_save_path}")
def main(): """Converts a keras model into ONNX format.""" # model = alexnet((224, 224, 3)) model = build_model( NASNetMobile(input_shape=(224, 224, 3), include_top=False, weights='imagenet')) model.load_weights(KERAS_MODEL_PATH) # If we have not specified explicitly image dimensions when creating # the model # # model = load_model(KERAS_MODEL_PATH) # model._layers[0].batch_input_shape = (batch_size, image_size, image_size, # channels) # # In order for the input_shape to be saved correctly we have to # clone the model into a new one # # model = clone_model(model) # # When cloning we loose the weights, load them again # # model.load_weights(KERAS_MODEL_PATH) onnx_model = keras2onnx.convert_keras(model, model.name) # target_opset=target_opset, # debug_mode=True keras2onnx.save_model(onnx_model, ONNX_MODEL_PATH)
def save_onnx(self, save_folder, save_name='base_model.onnx'): os.environ["TF_KERAS"] = '1' import efficientnet.tfkeras as efn import keras2onnx onnx_model = keras2onnx.convert_keras(self.base_model, self.base_model.name) keras2onnx.save_model(onnx_model, os.path.join(save_folder, save_name))
def convert_tensorflow(nlp: Pipeline, opset: int, output: Path): """ Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR Args: nlp: The pipeline to be exported opset: The actual version of the ONNX operator set to use output: Path where will be stored the generated ONNX model Notes: TensorFlow cannot export model bigger than 2GB due to internal constraint from TensorFlow """ if not is_tf_available(): raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.") print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\") try: import tensorflow as tf from keras2onnx import __version__ as k2ov from keras2onnx import convert_keras, save_model print(f"Using framework TensorFlow: {tf.version.VERSION}, keras2onnx: {k2ov}") # Build input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "tf") # Forward nlp.model.predict(tokens.data) onnx_model = convert_keras(nlp.model, nlp.model.name, target_opset=opset) save_model(onnx_model, output.as_posix()) except ImportError as e: raise Exception(f"Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first.")
def export_onnx( self, name: str = "model.onnx", opset: int = DEFAULT_ONNX_OPSET, doc_string: str = "", debug_mode: bool = True, **kwargs, ): """ Export an ONNX file for the current model. :param name: name of the onnx file to save :param opset: onnx opset to use for exported model. Default is 11 :param doc_string: optional doc string for exported ONNX model :param debug_mode: debug mode, default to True, passed into `convert_keras` :param kwargs: additional parameters passed into `convert_keras` """ if keras2onnx_import_error is not None: raise keras2onnx_import_error model_name = self._model.name or name.split(".onnx")[0] onnx_model = keras2onnx.convert_keras( self._model, name=model_name, target_opset=opset, doc_string=doc_string, debug_mode=debug_mode, **kwargs, ) onnx_path = os.path.join(self._output_dir, name) create_parent_dirs(onnx_path) keras2onnx.save_model(onnx_model, onnx_path)
def export_onnx_model_from_tf(model_name, opset_version, use_external_data_format, model_type, model_class, cache_dir, onnx_dir, input_names, use_gpu, precision, optimize_onnx, validate_onnx, use_raw_attention_mask, overwrite, model_fusion_statistics): config = AutoConfig.from_pretrained(model_name, cache_dir=cache_dir) model = load_pretrained_model(model_name, config=config, cache_dir=cache_dir, custom_model_class=model_class, if_tf_model=True) model._saved_model_inputs_spec = None tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir) max_input_size = tokenizer.max_model_input_sizes[ model_name] if model_name in tokenizer.max_model_input_sizes else 1024 example_inputs = tokenizer.encode_plus("This is a sample input", return_tensors="tf", max_length=max_input_size, pad_to_max_length=True, truncation=True) example_inputs = filter_inputs(example_inputs, input_names) example_outputs = model(example_inputs, training=False) # Flatten is needed for gpt2 and distilgpt2. example_outputs_flatten = flatten(example_outputs) example_outputs_flatten = update_flatten_list(example_outputs_flatten, []) onnx_model_path = get_onnx_file_path(onnx_dir, model_name, len(input_names), False, use_gpu, precision, False, use_external_data_format) if overwrite or not os.path.exists(onnx_model_path): logger.info("Exporting ONNX model to {}".format(onnx_model_path)) import keras2onnx onnx_model = keras2onnx.convert_keras(model, model.name, target_opset=opset_version) keras2onnx.save_model(onnx_model, onnx_model_path) else: logger.info(f"Skip export since model existed: {onnx_model_path}") model_type = model_type + '_keras' onnx_model_file, is_valid_onnx_model, vocab_size = validate_and_optimize_onnx( model_name, use_external_data_format, model_type, onnx_dir, input_names, use_gpu, precision, optimize_onnx, validate_onnx, use_raw_attention_mask, overwrite, config, model_fusion_statistics, onnx_model_path, example_inputs, example_outputs_flatten) return onnx_model_file, is_valid_onnx_model, vocab_size, max_input_size
def convertKerasToOnnx(fileName): model = load_model(fileName) # convert to onnx model onnx_model = keras2onnx.convert_keras(model, model.name) #onnx_model_file = './model/mask_recognition_v4_updated.onnx' fileName, fileExtension = os.path.splitext(fileName) onnx_model_file = fileName + ".onnx" keras2onnx.save_model(onnx_model, onnx_model_file) sess = onnxruntime.InferenceSession(onnx_model_file)
def export_to_onnx(model): # convert to onnx model onnx_model = keras2onnx.convert_keras(model, model.name) meta = onnx_model.metadata_props.add() meta.key = "creation_date" meta.value = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S") meta = onnx_model.metadata_props.add() meta.key = "author" meta.value = 'keithpij' onnx_model.doc_string = 'MNIST model converted from Keras' onnx_model.model_version = 3 # This must be an integer or long. keras2onnx.save_model(onnx_model, ONNX_MODEL_FILE)
def main(_argv): print("Loading", FLAGS.model) if not os.path.exists(FLAGS.model): print( "h5 model not found at path: {}\nUse the --model flag to specify a path to your h5 model." .format(FLAGS.model)) return None model = load_model(FLAGS.model) print("Converting to ONNX...") onnx_model = keras2onnx.convert_keras(model) keras2onnx.save_model(onnx_model, FLAGS.out) print("Success. Output at:", FLAGS.out)
def save_model(model, model_path, model_name): print("\nConvert model to ONNX format ...") onnx_model = keras2onnx.convert_keras(model, model.name) # create directory if not exists if not os.path.exists(model_path): os.makedirs(model_path) # save model and log to mlflow model_path = os.path.join(model_path, model_name) print("saving ONNX model to ", model_path) keras2onnx.save_model(onnx_model, model_path) mlflow.onnx.log_model(onnx_model, model_path) return onnx_model
def convert(input_ops_dict, output_ops, input_model, output_model): '''Convert keras h5 to tensorflow pb Args: input_ops_dict: input ops dict including names and shapes output_ops: output op names input_model: input keras h5 model name output_model: output pb model name ''' onnx_name = ".tmp.onnx" pb_name = ".tmp.pb" # keras --> onnx --> pb --> onnx --> pb # keras --> onnx model = tf.keras.models.load_model(input_model) onnx_model = keras2onnx.convert_keras(model, model.name, target_opset=8) keras2onnx.save_model(onnx_model, onnx_name) # onnx --> tf onnx_model = onnx.load(onnx_name) tf_rep = prepare(onnx_model, input_shape_dict=input_ops_dict) tf_rep.export_graph(pb_name) # tf --> onnx (fold constants) inputs = input_ops_dict.keys() inputs = [i + ":0" for i in inputs] outputs = output_ops graph_def, inputs, outputs = tf_loader.from_graphdef( pb_name, inputs, outputs) with tf.Graph().as_default() as tf_graph: tf.import_graph_def(graph_def, name="") g = tf2onnx.tfonnx.process_tf_graph(tf_graph, opset=8, input_names=inputs, output_names=outputs) onnx_graph = optimizer.optimize_graph(g) model_proto = onnx_graph.make_model("converted from %s" % pb_name) utils.save_protobuf(onnx_name, model_proto) # onnx --> tf onnx_model = onnx.load(onnx_name) tf_rep = prepare(onnx_model, input_shape_dict=input_ops_dict) tf_rep.export_graph(output_model) # remove tmp files if os.path.exists(onnx_name): os.remove(onnx_name) if os.path.exists(pb_name): os.remove(pb_name)
def export_to_onnx(model): print("Exporting...") # convert to onnx model onnx_model = keras2onnx.convert_keras(model, model.name) # Add metadata to the ONNX model. meta = onnx_model.metadata_props.add() meta.key = "creation_date" meta.value = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S") meta = onnx_model.metadata_props.add() meta.key = "author" meta.value = 'hardplant' onnx_model.doc_string = 'classifier' onnx_model.model_version = 1 # This must be an integer or long. keras2onnx.save_model(onnx_model, ONNX_MODEL_FILE) print("Exported")
def test_SRResNet(self): K.clear_session() keras_model = get_srresnet_model() data = np.random.rand(2, 32, 32, 3).astype(np.float32) expected = keras_model.predict(data) onnx_model = keras2onnx.convert_keras(keras_model, keras_model.name) keras2onnx.save_model(onnx_model, 'sr_resnet.onnx') self.assertTrue( run_keras_and_ort(onnx_model.graph.name, onnx_model, keras_model, data, expected, self.model_files, rtol=1e-2, atol=1e-4))
def convert_to_onnx(input_model_path, model_name, output_model_path): model = load_model(input_model_path) onnx_model = keras2onnx.convert_keras(model, model_name, debug_mode=1) return keras2onnx.save_model(onnx_model, output_model_path)
def export_onnx( self, name: str = "model.onnx", opset: int = DEFAULT_ONNX_OPSET, doc_string: str = "", debug_mode: bool = True, raise_on_tf_support: bool = True, **kwargs, ): """ Export an ONNX file for the current model. :param name: name of the onnx file to save :param opset: onnx opset to use for exported model. Default is 11 :param doc_string: optional doc string for exported ONNX model :param debug_mode: debug mode, default to True, passed into `convert_keras` :param kwargs: additional parameters passed into `convert_keras` """ if keras2onnx_import_error is not None: raise keras2onnx_import_error if raise_on_tf_support: import tensorflow v = tensorflow.__version__ if v >= "2.3.0": raise ValueError( f"Tensorflow version {v} is greater than the currently supported " "version for keras2onnx. Please downgrade the Tensorflow <2.3.0 " "or set raise_on_tf_support to False to continue.") model_name = self._model.name or name.split(".onnx")[0] onnx_model = keras2onnx.convert_keras( self._model, name=model_name, target_opset=opset, doc_string=doc_string, debug_mode=debug_mode, **kwargs, ) onnx_path = os.path.join(self._output_dir, name) create_parent_dirs(onnx_path) keras2onnx.save_model(onnx_model, onnx_path)
def tf_keras_convert_to_onnx(models, paths, config): """ 将keras模型转换为onnx :param models: :param paths: :param config: :return: """ onnxNerBert = keras2onnx.convert_keras(models, models.name, target_opset=12) keras2onnx.save_model(onnxNerBert, paths) optimized_model = optimizer.optimize_model( paths, model_type='bert_keras', num_heads=config.num_attention_heads, hidden_size=config.hidden_size) optimized_model.use_dynamic_axes() optimized_model.save_model_to_file(paths)
def save(self, epoch): self.model.summary() self.model.inputs[0].shape.dims[0]._value = 1 for index, layer in enumerate(self.model.layers): print(index) try: layer.batch_size = 1 except: pass try: layer._batch_input_shape = ((1, layer._batch_input_shape[1], layer._batch_input_shape[2], layer._batch_input_shape[3])) except: pass try: layer.input_shape[0] = ((1, layer.input_shape[0][1], layer.input_shape[0][2], layer.input_shape[0][3])) except: pass try: layer.output_shape[0] = ((1, layer.output_shape[0][1], layer.output_shape[0][2], layer.output_shape[0][3])) except: layer.output._shape._dims[0]._value = 1 self.model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) self.model.summary() self.model_save_path = os.path.join( self.config.checkpoint_dir, "generator_scale_{}.h5".format(epoch)) self.model.save(self.model_save_path) restored_model = tf.keras.models.load_model(self.model_save_path, compile=False) onnx_model = keras2onnx.convert_keras(restored_model, restored_model.name) keras2onnx.save_model(onnx_model, '/root/optimization/ocrSecurity/model.onnx')
def export_model_to_onnx(self, fpath, quantize=False, target_opset=None, verbose=1): """ Export model to onnx Args: fpath(str): String representing full path to model file where ONNX model will be saved. Example: '/tmp/my_model.onnx' quantize(str): If True, will create a total of three model files will be created using transformers.convert_graph_to_onnx: 1) ONNX model (created directly using keras2onnx 2) an optimized ONNX model (created by transformers library) 3) a quantized version of optimized ONNX model (created by transformers library) All files will be created in the parent folder of fpath: Example: If fpath='/tmp/model.onnx', then both /tmp/model-optimized.onnx and /tmp/model-optimized-quantized.onnx will also be created. verbose(bool): verbosity Returns: str: string representing fpath. If quantize=True, returned fpath will be different than supplied fpath """ try: import onnxruntime, onnxruntime_tools, onnx, keras2onnx except ImportError: raise Exception('This method requires ONNX libraries to be installed: '+\ 'pip install -q --upgrade onnxruntime==1.5.1 onnxruntime-tools onnx keras2onnx') from pathlib import Path if type(self.preproc).__name__ == 'BERTPreprocessor': raise Exception('currently_unsupported: BERT models created with text_classifier("bert",...) are not supported (i.e., keras_bert models). ' +\ 'Only BERT models created with Transformer(...) are supported.') if verbose: print( 'converting to ONNX format ... this may take a few moments...') if U.is_huggingface(model=self.model): tokenizer = self.preproc.get_tokenizer() maxlen = self.preproc.maxlen input_dict = tokenizer('Name', return_tensors='tf', padding='max_length', max_length=maxlen) if version.parse(tf.__version__) < version.parse('2.2'): raise Exception( 'export_model_to_tflite requires tensorflow>=2.2') #self.model._set_inputs(input_spec, training=False) # for tf < 2.2 self.model._saved_model_inputs_spec = None # for tf > 2.2 self.model._set_save_spec(input_dict) # for tf > 2.2 self.model._get_save_spec() onnx_model = keras2onnx.convert_keras(self.model, self.model.name, target_opset=target_opset) keras2onnx.save_model(onnx_model, fpath) return_fpath = fpath if quantize: from transformers.convert_graph_to_onnx import optimize, quantize #opt_path = optimize(Path(fpath)) if U.is_huggingface(model=self.model) and\ type(self.model).__name__ in ['TFDistilBertForSequenceClassification', 'TFBertForSequenceClassification']: try: from onnxruntime_tools import optimizer from onnxruntime_tools.transformers.onnx_model_bert import BertOptimizationOptions # disable embedding layer norm optimization for better model size reduction opt_options = BertOptimizationOptions('bert') opt_options.enable_embed_layer_norm = False opt_model = optimizer.optimize_model( fpath, 'bert', # bert_keras causes error with transformers num_heads=12, hidden_size=768, optimization_options=opt_options) opt_model.save_model_to_file(fpath) except: warnings.warn('Could not run BERT-specific optimizations') pass quantize_path = quantize(Path(fpath)) return_fpath = quantize_path.as_posix() if verbose: print('done.') return return_fpath
activation='relu')) model.add(Conv3D(32, 1, padding="same", activation="relu")) model.add( Conv3DTranspose(32, 2, strides=(2, 2, 2), padding="same", activation='relu')) model.add(Conv3D(3, 1, padding="same", activation="sigmoid")) model.compile(loss='mse', optimizer="adam", metrics=[]) model.summary() # Training model.fit(x_train, x_train, batch_size=args.batch_size, epochs=args.epochs) # Evaluation eval_loss = model.evaluate(x_train, x_train) print("Evaluation result: Loss:", eval_loss) # In case of providing output metric file, store the test mse value if args.output_metric != "": with open(args.output_metric, 'w') as ofile: ofile.write(str(eval_loss)) # Convert to ONNX onnx_model = keras2onnx.convert_keras(model, "convT3D_synthetic", debug_mode=1) # Save ONNX to file keras2onnx.save_model(onnx_model, args.output_path)
def train(lol="lal", epochs=1, metadata_file=r"./outputs/training_metadata.yaml", learning_rate=0.001): print(__name__) logger.info(lol) def load_data(path, num_images=5, image_size=28): f = gzip.open("./data/" + path, 'r') logger.info(path) if image_size < 28: f.read(8) else: f.read(16) buf = f.read(image_size * image_size * num_images) data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32) logger.info(data.shape) data = data.reshape(num_images, image_size, image_size) return data data_paths = os.listdir("./data") logger.info(data_paths) test_paths = list(filter(lambda x: "t10k" in x, data_paths)) test_images_path = list(filter(lambda x: "images" in x, test_paths))[0] test_labels_path = list(filter(lambda x: "labels" in x, test_paths))[0] train_paths = list(filter(lambda x: "train" in x, data_paths)) train_images_path = list(filter(lambda x: "images" in x, train_paths))[0] train_labels_path = list(filter(lambda x: "labels" in x, train_paths))[0] (x_train, y_train), (x_test, y_test) = (load_data( train_images_path, 60_000), load_data(train_labels_path, 60_000, 1)), (load_data(test_images_path, 10_000), load_data(test_labels_path, 10_000, 1)) x_train, x_test = x_train / 255.0, x_test / 255.0 # Add a channels dimension x_train = x_train[..., tf.newaxis].astype("float32") x_test = x_test[..., tf.newaxis].astype("float32") logger.info(x_train.shape) train_ds = tf.data.Dataset.from_tensor_slices( (x_train, y_train)).shuffle(10000).batch(32) test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32) class MyModel(Model): def __init__(self): super(MyModel, self).__init__() self.conv1 = Conv2D(32, 3, activation='relu') self.flatten = Flatten() self.d1 = Dense(128, activation='relu') self.d2 = Dense(10) def call(self, x): x = self.conv1(x) x = self.flatten(x) x = self.d1(x) return self.d2(x) # Create an instance of the model model = MyModel() loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True) optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') test_loss = tf.keras.metrics.Mean(name='test_loss') test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='test_accuracy') model.compile(optimizer=optimizer, loss=train_loss, metrics=[train_accuracy]) @tf.function def train_step(images, labels): with tf.GradientTape() as tape: # training=True is only needed if there are layers with different # behavior during training versus inference (e.g. Dropout). predictions = model(images, training=True) loss = loss_object(labels, predictions) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) train_loss(loss) train_accuracy(labels, predictions) @tf.function def test_step(images, labels): # training=False is only needed if there are layers with different # behavior during training versus inference (e.g. Dropout). predictions = model(images, training=False) t_loss = loss_object(labels, predictions) test_loss(t_loss) test_accuracy(labels, predictions) EPOCHS = epochs for epoch in range(EPOCHS): # Reset the metrics at the start of the next epoch train_loss.reset_states() train_accuracy.reset_states() test_loss.reset_states() test_accuracy.reset_states() for images, labels in train_ds: train_step(images, labels) for test_images, test_labels in test_ds: test_step(test_images, test_labels) logger.info( json.dumps({ "Epoch": epoch + 1, "Loss": train_loss.result().numpy().item(), "Accuracy": train_accuracy.result().numpy().item() * 100, "Test Loss": test_loss.result().numpy().item(), "Test Accuracy": test_accuracy.result().numpy().item() * 100 })) onnx_model = keras2onnx.convert_keras(model, "mnist") temp_model_file = './outputs/model.onnx' keras2onnx.save_model(onnx_model, temp_model_file) metadata = { 'training_statistics': { 'train_accuracy': train_accuracy.result().numpy().item(), 'test_accuracy': test_accuracy.result().numpy().item(), 'train_loss': train_loss.result().numpy().item(), 'test_loss': test_loss.result().numpy().item(), 'Epochs': EPOCHS } } logger.info((metadata)) with open(metadata_file, 'w') as file: documents = yaml.dump(metadata, file) logger.info("trying shit") session = rt.InferenceSession(temp_model_file) input_name = session.get_inputs()[0].name output_name = session.get_outputs()[0].name
tf.train.latest_checkpoint(checkpoint_dir)) else: for filename in sorted(os.listdir(checkpoint_dir)): if filename.endswith(".index") and checkpoint_name in filename: ckpt_name = os.path.splitext(filename)[0] ckpt_path = os.path.splitext( os.path.join(checkpoint_dir, filename))[0] checkpoint.restore(ckpt_path) return print("Checkpoint " + ckpt_name + " restored") print("No chekpoint as {} found".format(checkpoint_name)) # SAVE THE MODEL TO ONNX saved_model_path = os.path.dirname(__file__) + '/saved_models/' chosen_checkpoint = "ckpt-41" LoadCheckpoint(chosen_checkpoint, latest=True) # generator.summary() # print(generator.output_names) # print(generator.metrics_names) print(generator.outputs) # THIS EXPORT IS THE ONE WHICH WORKS! output_onnx_path = os.path.dirname( __file__) + '/saved_models/onnx_models/pp_p2p_v4_k2o.onnx' onnx_model = keras2onnx.convert_keras(generator, generator.name, target_opset=10) keras2onnx.save_model(onnx_model, output_onnx_path)
def keras_to_onnx(keras_model, out_file): model = load_model(keras_model) onnx_model = keras2onnx.convert_keras(model, model.name) keras2onnx.save_model(onnx_model, out_file)
print(f"{score:.5f} - {character} [{index}]") print() ################################################################################ # Save: Convert the model to ONNX and save. # print() print(f"Saving an ONNX model:") # convert onnx_model = keras2onnx.convert_keras(model, model.name) # save keras2onnx.save_model(onnx_model, ONNX_MODEL_FILE) ################################################################################ # Save: Save the model to a frozen TensorFlow model. # from tensorflow import TensorSpec from tensorflow.io import write_graph from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 print() print(f"Saving a frozen TensorFlow model:") # convert the Keras model to a concrete function spec = TensorSpec(shape=model.inputs[0].shape, dtype=model.inputs[0].dtype) full_model = tf.function(lambda x: model(x)).get_concrete_function(spec)
# Channels axis - shape (1,5,5,1) x = x[..., np.newaxis] # duplicate x to have 3 channels - shape (1,5,5,3) # (the channel values are the same for each pixel) x = np.repeat(x, NUM_CHANNELS, axis=3) model = tf.keras.Sequential() model.add( keras.layers.Conv2D( NUM_KERNELS, (2, 2), strides=1, kernel_initializer="ones", # For checking of biases, issue #7. bias_initializer="random_normal")) #model.add(keras.layers.MaxPooling2D((2, 2), strides=2)) # flatten to 1d makes it easier to have a generic main() model.add(keras.layers.Flatten()) optimizer = tf.keras.optimizers.SGD(learning_rate=0.1) model.compile(optimizer=optimizer, loss="MSE") predictions = model.predict(x) print(predictions) onnx_model = keras2onnx.convert_keras(model, "demo") keras2onnx.save_model(onnx_model, "demo.onnx")
import sys from constraints import ZeroSomeWeights from keras.utils.generic_utils import get_custom_objects get_custom_objects().update({"ZeroSomeWeights": ZeroSomeWeights}) model = load_model(sys.argv[1]) X = np.array(np.random.rand(10, 21), dtype=np.float32) print(model.predict(X)) # convert to onnx model onnx_model = keras2onnx.convert_keras(model, model.name) temp_model_file = 'NN_model.onnx' keras2onnx.save_model(onnx_model, temp_model_file) sess = onnxruntime.InferenceSession(temp_model_file) input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name # This input name is needed in Classifier_cff as NNIdONNXInputName print(sess.get_inputs()[0].name) print(label_name) # The name of the output is needed in Clasifier_cff as NNIdONNXOutputName # predict on random input and compare to previous keras model for i in range(len(X)): pred_onx = sess.run([label_name], {input_name: X[i:i + 1]})[0] print(pred_onx)