def run_compare_tf( graph, feed_dict, output_nodes, use_cpu_only=False, use_cpu_for_conversion=False, frontend_only=False, frontend="tensorflow", backend=("neuralnetwork", "fp32"), atol=1e-04, rtol=1e-05, validate_shapes_only=False, freeze_graph=False, tf_outputs=None, ): """ Utility function to convert and compare a given TensorFlow 1.x model. Parameters ---------- graph: tf.Graph TensorFlow 1.x model in tf.Graph format. feed_dict: dict of (tf.placeholder, np.array) Dict of placeholder and value pairs representing inputs. output_nodes: tf.node or list[tf.node] List of names representing outputs. use_cpu_only: bool If true, use CPU only for prediction, otherwise, use GPU also. use_cpu_for_conversion: bool If true, the converter is invoked using "ct.convert(...., useCPUOnly=True)", which in turn forces the model to be loaded with the CPU context, which happens when the converter loads the ML model object from the proto spec using "ct.models.MLModel(proto_spec, useCPUOnly=True)". The other argument, i.e., "use_cpu_only" on the other hand refers to only the compute engine for prediction purposes. For a model that is loaded on a non-CPU context, it can still be forced to execute on the CPU at the time of prediction. Hence, "use_cpu_for_conversion = False && use_cpu_only = True" is valid and results in a case when a model is loaded for GPU but executed on the CPU. The scenario, "use_cpu_for_conversion = True && use_cpu_only = False" is invalid though, since once a model is loaded on a CPU context its context cannot be changed to a non CPU device at the time of prediction. frontend_only: bool If true, skip the prediction call, only validate conversion. frontend: str Frontend to convert from. backend: str Backend to convert to. atol: float The absolute tolerance parameter. rtol: float The relative tolerance parameter. validate_shapes_only: bool If true, skip element-wise value comparision. freeze_graph: bool If True, use the "tensorflow.python.tools.freeze_graph" function to freeze the TF graph prior to conversion. This will ensure that all the variables in the graph have been converted to constants. tf_outputs: float or list[float] If present, use it as TensorFlow predictions Return: Proto, mlmodel, input dictionay, prediction(if possible) """ if use_cpu_for_conversion and not use_cpu_only: # use_cpu_for_conversion = True && use_cpu_only = False raise ValueError( "use_cpu_for_conversion = True && use_cpu_only = False is an invalid test case" ) if not isinstance(output_nodes, (tuple, list)): output_nodes = [output_nodes] if freeze_graph: with tempfile.TemporaryDirectory() as model_dir: graph_def_file = os.path.join(model_dir, "tf_graph.pb") checkpoint_file = os.path.join(model_dir, "tf_model.ckpt") static_model_file = os.path.join(model_dir, "tf_static.pb") with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) if tf_outputs is None: tf_outputs = sess.run(output_nodes, feed_dict=feed_dict) tf.train.write_graph(sess.graph, model_dir, graph_def_file, as_text=False) saver = tf.train.Saver() saver.save(sess, checkpoint_file) output_node_names = get_tf_node_names(output_nodes, mode="outputs") output_node_names = [ name.split(":")[0] for name in output_node_names ] output_op_names = ",".join(output_node_names) freeze_g( input_graph=graph_def_file, input_saver="", input_binary=True, input_checkpoint=checkpoint_file, output_node_names=output_op_names, restore_op_name="save/restore_all", filename_tensor_name="save/Const:0", output_graph=static_model_file, clear_devices=True, initializer_nodes="", ) graph = load_tf_pb(static_model_file) mlmodel, input_key_values, output_names, output_nodes = tf_graph_to_mlmodel( graph, feed_dict, output_nodes, frontend, backend, use_cpu_for_conversion=use_cpu_for_conversion, ) if frontend_only or coremltoolsutils._macos_version() < (10, 13) \ or (mlmodel.is_package and coremltoolsutils._macos_version() < (12, 0)): return mlmodel._spec, mlmodel, input_key_values, None if tf_outputs is None: with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) tf_outputs = sess.run(output_nodes, feed_dict=feed_dict) expected_outputs = { name: val for name, val in zip(output_names, tf_outputs) } for k, v in input_key_values.items(): if isinstance(v, np.ndarray) and issubclass(v.dtype.type, np.integer): input_key_values[k] = v.astype( np.float) # Core ML only accepts floats if validate_shapes_only: compare_shapes(mlmodel, input_key_values, expected_outputs, use_cpu_only) else: compare_backend( mlmodel, input_key_values, expected_outputs, use_cpu_only, atol=atol, rtol=rtol, also_compare_shapes=True, dtype=backend[1], ) pred = None if not coremltoolsutils._has_custom_layer(mlmodel.get_spec()): pred = run_core_ml_predict(mlmodel, input_key_values, use_cpu_only) else: print('Skipping model prediction as it has a custom nn layer!') return mlmodel._spec, mlmodel, input_key_values, pred
def run_compare_tf( graph, feed_dict, output_nodes, use_cpu_only=False, frontend_only=False, frontend="tensorflow", backend="nn_proto", atol=1e-04, rtol=1e-05, validate_shapes_only=False, freeze_graph=False, tf_outputs=None, ): """ Utility function to convert and compare a given TensorFlow 1.x model. Parameters ---------- graph: tf.Graph TensorFlow 1.x model in tf.Graph format. feed_dict: dict of (tf.placeholder, np.array) Dict of placeholder and value pairs representing inputs. output_nodes: tf.node or list[tf.node] List of names representing outputs. use_cpu_only: bool If true, use CPU only for prediction, otherwise, use GPU also. frontend_only: bool If true, skip the prediction call, only validate conversion. frontend: str Frontend to convert from. backend: str Backend to convert to. atol: float The absolute tolerance parameter. rtol: float The relative tolerance parameter. validate_shapes_only: bool If true, skip element-wise value comparision. tf_outputs: float or list[float] If present, use it as TensorFlow predictions Return: Proto """ mlmodel, input_key_values, output_names, output_nodes = tf_graph_to_mlmodel( graph, feed_dict, output_nodes, frontend, backend) if frontend_only: return if not isinstance(output_nodes, (tuple, list)): output_nodes = [output_nodes] if freeze_graph: model_dir = tempfile.mkdtemp() graph_def_file = os.path.join(model_dir, "tf_graph.pb") checkpoint_file = os.path.join(model_dir, "tf_model.ckpt") static_model_file = os.path.join(model_dir, "tf_static.pb") coreml_model_file = os.path.join(model_dir, "coreml_model.mlmodel") with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) tf_outputs = sess.run(output_nodes, feed_dict=feed_dict) tf.train.write_graph(sess.graph, model_dir, graph_def_file, as_text=False) saver = tf.train.Saver() saver.save(sess, checkpoint_file) freeze_g( input_graph=graph_def_file, input_saver="", input_binary=True, input_checkpoint=checkpoint_file, output_node_names=",".join([n.op.name for n in output_nodes]), restore_op_name="save/restore_all", filename_tensor_name="save/Const:0", output_graph=static_model_file, clear_devices=True, initializer_nodes="", ) graph = load_tf_pb(static_model_file) # Need to convert again using frozen graph mlmodel, input_key_values, output_names, output_nodes = tf_graph_to_mlmodel( graph, feed_dict, output_nodes, frontend, backend) else: if not tf_outputs: with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) tf_outputs = sess.run(output_nodes, feed_dict=feed_dict) expected_outputs = { name: val for name, val in zip(output_names, tf_outputs) } for k, v in input_key_values.items(): if isinstance(v, np.ndarray) and issubclass(v.dtype.type, np.integer): input_key_values[k] = v.astype( np.float) # Core ML only accepts floats if validate_shapes_only: compare_shapes(mlmodel, input_key_values, expected_outputs, use_cpu_only) else: compare_backend( mlmodel, input_key_values, expected_outputs, use_cpu_only, atol=atol, rtol=rtol, also_compare_shapes=True, ) return mlmodel._spec
def run_compare_tf( graph, feed_dict, output_nodes, inputs_for_conversion=None, use_cpu_for_conversion=False, frontend_only=False, frontend="tensorflow", backend=("neuralnetwork", "fp32"), atol=1e-04, rtol=1e-05, validate_shapes_only=False, freeze_graph=False, tf_outputs=None, minimum_deployment_target=None, ): """ Utility function to convert and compare a given TensorFlow 1.x model. Parameters ---------- graph: tf.Graph TensorFlow 1.x model in tf.Graph format. feed_dict: dict of (tf.placeholder, np.array) Dict of placeholder and value pairs representing inputs. output_nodes: tf.node or list[tf.node] List of names representing outputs. inputs_for_conversion: list of coremltools.TensorType() or coremltools.ImageType() objects Defaults to None. It is passed as is to the "inputs" argument of the converter. use_cpu_for_conversion: bool If True, the model to be loaded with the CPU context. frontend_only: bool If true, skip the prediction call, only validate conversion. frontend: str Frontend to convert from. backend: str Backend to convert to. atol: float The absolute tolerance parameter. rtol: float The relative tolerance parameter. validate_shapes_only: bool If True, skip element-wise value comparision. freeze_graph: bool If True, use the "tensorflow.python.tools.freeze_graph" function to freeze the TF graph prior to conversion. This will ensure that all the variables in the graph have been converted to constants. tf_outputs: float or list[float] If present, use it as TensorFlow predictions minimum_deployment_target : coremltools.target enumeration It set the minimum_deployment_target argument in the coremltools.convert functino. Return: Proto, mlmodel, input dictionay, prediction(if possible) """ if not isinstance(output_nodes, (tuple, list)): output_nodes = [output_nodes] if freeze_graph: with tempfile.TemporaryDirectory() as model_dir: graph_def_file = os.path.join(model_dir, "tf_graph.pb") checkpoint_file = os.path.join(model_dir, "tf_model.ckpt") static_model_file = os.path.join(model_dir, "tf_static.pb") with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) if tf_outputs is None: tf_outputs = sess.run(output_nodes, feed_dict=feed_dict) tf.train.write_graph(sess.graph, model_dir, graph_def_file, as_text=False) saver = tf.train.Saver() saver.save(sess, checkpoint_file) output_node_names = get_tf_node_names(output_nodes, mode="outputs") output_node_names = [ name.split(":")[0] for name in output_node_names ] output_op_names = ",".join(output_node_names) freeze_g( input_graph=graph_def_file, input_saver="", input_binary=True, input_checkpoint=checkpoint_file, output_node_names=output_op_names, restore_op_name="save/restore_all", filename_tensor_name="save/Const:0", output_graph=static_model_file, clear_devices=True, initializer_nodes="", ) graph = load_tf_pb(static_model_file) mlmodel, input_key_values, output_names, output_nodes = tf_graph_to_mlmodel( graph, feed_dict, output_nodes, frontend, backend, use_cpu_for_conversion=use_cpu_for_conversion, inputs_for_conversion=inputs_for_conversion, minimum_deployment_target=minimum_deployment_target) if frontend_only or coremltoolsutils._macos_version() < (10, 13) \ or (mlmodel.is_package and coremltoolsutils._macos_version() < (12, 0)): return mlmodel._spec, mlmodel, input_key_values, None if tf_outputs is None: with tf.Session(graph=graph) as sess: sess.run(tf.global_variables_initializer()) tf_outputs = sess.run(output_nodes, feed_dict=feed_dict) expected_outputs = { name: val for name, val in zip(output_names, tf_outputs) } for k, v in input_key_values.items(): if isinstance(v, np.ndarray) and issubclass(v.dtype.type, np.integer): input_key_values[k] = v.astype( np.float) # Core ML only accepts floats pred = None if validate_shapes_only: compare_shapes(mlmodel, input_key_values, expected_outputs) elif not coremltoolsutils._has_custom_layer(mlmodel._spec): pred = compare_backend( mlmodel, input_key_values, expected_outputs, atol=atol, rtol=rtol, also_compare_shapes=True, dtype=backend[1], ) else: print('Skipping model prediction as it has a custom nn layer!') return mlmodel._spec, mlmodel, input_key_values, pred