def test_dynamic_slice(self, backend): class DynamicSlicer(torch.nn.Module): def __init__(self): super(DynamicSlicer, self).__init__() def forward(self, x, context_length): return x[context_length:, :, :] class Model(torch.nn.Module): def __init__(self): super(Model, self).__init__() self.tokens_embedding = torch.nn.Embedding(10, 10, 0) self.context_embedding = torch.nn.Embedding(10, 10, 0) self.dynamic_slicer = DynamicSlicer() def forward(self, tokens, context, context_length): tokens_embeddings = self.tokens_embedding(tokens) context_embeddings = self.context_embedding(context) embeddings = torch.cat((context_embeddings, tokens_embeddings), dim=0) embeddings = self.dynamic_slicer(embeddings, context_length) return embeddings model = Model() batch_size = 5 inputs = [ TensorType(name="tokens", shape=(10, batch_size), dtype=np.int64), TensorType(name="context", shape=(3, batch_size), dtype=np.int64), TensorType(name="context_length", shape=(), dtype=np.int32), ] run_compare_torch(inputs, model, rand_range=(0, 8), backend=backend, use_scripting=False)
def test_graph_def(self): with tf.Graph().as_default() as graph: x = tf.placeholder(tf.float32, shape=(3, 4, 5)) out = tf.nn.relu(x) mlmodel = converter.convert( graph, inputs=[TensorType(x.op.name, (3, 4, 5))], outputs=[out.op.name] ) assert mlmodel is not None
def _convert_to_inputtype(inputs): if isinstance(inputs, list): return [_convert_to_inputtype(x) for x in inputs] elif isinstance(inputs, tuple): return tuple([_convert_to_inputtype(x) for x in inputs]) elif isinstance(inputs, torch.Tensor): return TensorType(shape=inputs.shape) else: raise ValueError("Unable to parse type {} into InputType.".format( type(inputs)))
def test_model_metadata(self): with tf.Graph().as_default() as graph: x = tf.placeholder(tf.float32, shape=(3, 4, 5)) out = tf.nn.relu(x) mlmodel = converter.convert( graph, inputs=[TensorType(x.op.name, (3, 4, 5))], outputs=[out.op.name] ) metadata_keys = mlmodel.get_spec().description.metadata.userDefined assert "com.github.apple.coremltools.version" in metadata_keys assert "com.github.apple.coremltools.source" in metadata_keys assert "tensorflow==1." in metadata_keys["com.github.apple.coremltools.source"]
def test_tf_keras(self): keras_model = tf.keras.Sequential( [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)]) input_names, output_names = get_tf_keras_io_names(keras_model) mlmodel = converter.convert( keras_model, inputs=[TensorType(input_names[0], (3, 4, 5))], outputs=["Identity"], source=frontend, ) assert mlmodel is not None
def test_graph_def_file(self): with tf.Graph().as_default() as graph: x = tf.placeholder(tf.float32, shape=(3, 4, 5)) out = tf.nn.relu(x) tf.io.write_graph( graph, self.saved_model_dir, self.model_path_pb, as_text=False ) mlmodel = converter.convert( self.model_path_pb, inputs=[TensorType(x.op.name, (3, 4, 5))], outputs=[out.op.name], ) assert mlmodel is not None
def test_tf_keras_hdf5_file(self): keras_model = tf.keras.Sequential( [tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)] ) keras_model.save(self.model_path_h5) input_names, output_names = get_tf_keras_io_names(keras_model) mlmodel = converter.convert( self.model_path_h5, inputs=[TensorType(input_names[0], (3, 4, 5))], outputs=["Identity"], source=frontend, ) if mlmodel is None: raise AssertionError
def test_invalid_input_names(self): x_shape = (3, 4, 5) @make_tf_graph([x_shape]) def build_model(x): return tf.nn.relu(x) model, inputs, outputs = build_model with pytest.raises(ValueError) as e: converter.convert(model, inputs=[TensorType("invalid_name", x_shape)]) e.match( r"Input \(invalid_name\) provided is not found in given tensorflow graph. Placeholders in graph are: .*" )
def test_infer_outputs(self): x_shape = (3, 4, 5) @make_tf_graph([x_shape]) def build_model(x): return tf.nn.relu(x) model, inputs, outputs = build_model input_name = ( inputs[0] if isinstance(inputs[0], six.string_types) else inputs[0].op.name ) mlmodel = converter.convert(model, inputs=[TensorType(input_name, (3, 4, 5))]) assert mlmodel is not None input_values = [random_gen(x_shape, -10.0, 10.0)] input_dict = dict(zip(inputs, input_values)) run_compare_tf(model, input_dict, outputs)
def tf_graph_to_proto(graph, feed_dict, output_nodes, frontend="tensorflow", backend="nn_proto"): """ Parameters ---------- graph: tf.Graph TensorFlow 1.x model in tf.Graph format. feed_dict: dict of (tf.placeholder, np.array) Dict of placeholder and value pairs representing inputs. output_nodes: tf.node or list[tf.node] List of names representing outputs. frontend: str Frontend to convert from. backend: str Backend to convert to. ----------- Returns Proto, Input Values, Output Names """ if isinstance(output_nodes, tuple): output_nodes = list(output_nodes) if not isinstance(output_nodes, list): output_nodes = [output_nodes] # Convert TF graph. input_names = get_tf_node_names(list(feed_dict.keys()), mode="inputs") output_names = get_tf_node_names(output_nodes, mode="outputs") input_values = { name: val for name, val in zip(input_names, feed_dict.values()) } inputs = [TensorType(name=input_name) for input_name in input_names] mlmodel = converter.convert(graph, inputs=inputs, outputs=output_names, source=frontend, convert_to=backend) proto = mlmodel.get_spec() return proto, input_values, output_names, output_nodes
def test_shaping_utils(self): @make_tf_graph([(None, 4, 5)]) def build_flexible_model(x): return tf.nn.relu(x) model, inputs, outputs = build_flexible_model input_name = TFConverter._get_tensor_name(inputs[0]) output_name = TFConverter._get_tensor_name(outputs[0]) # static-Flexible shape mlmodel = converter.convert( model, inputs=[ # Use TF's input shapes (None, 4, 5) TensorType(name=input_name)], outputs=[output_name] ) assert mlmodel is not None input_values = [random_gen((3, 4, 5), -10.0, 10.0)] input_dict = {input_name: input_values[0]} if _IS_MACOS: ret = mlmodel.predict(input_dict) np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) # Enumerate shape inputs_shape = [ TensorType(input_name, EnumeratedShapes(shapes=[(3, 4, 5), (4, 4, 5)])) ] mlmodel = converter.convert(model, inputs=inputs_shape, outputs=[output_name]) assert mlmodel is not None input_values = [random_gen((3, 4, 5), -10.0, 10.0)] input_dict = {input_name: input_values[0]} if _IS_MACOS: ret = mlmodel.predict(input_dict) np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) input_values = [random_gen((4, 4, 5), -10.0, 10.0)] input_dict = {input_name: input_values[0]} if _IS_MACOS: ret = mlmodel.predict(input_dict) np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) if _IS_MACOS: with pytest.raises(RuntimeError) as e: input_values = [random_gen((5, 4, 5), -10.0, 10.0)] input_dict = {input_name: input_values[0]} ret = mlmodel.predict(input_dict) # Ranged shape inputs_shape = [TensorType(input_name, [RangeDim(3, 5), 4, 5])] mlmodel = converter.convert(model, inputs=inputs_shape, outputs=[output_name]) assert mlmodel is not None input_values = [random_gen((3, 4, 5), -10.0, 10.0)] input_dict = {input_name: input_values[0]} if _IS_MACOS: ret = mlmodel.predict(input_dict) np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) input_values = [random_gen((4, 4, 5), -10.0, 10.0)] input_dict = {input_name: input_values[0]} if _IS_MACOS: ret = mlmodel.predict(input_dict) np.allclose(ret[output_name], np.maximum(input_values[0], 0.0)) if _IS_MACOS: with pytest.raises(RuntimeError) as e: input_values = [random_gen((2, 4, 5), -10.0, 10.0)] input_dict = {input_name: input_values[0]} ret = mlmodel.predict(input_dict)