def test_single_layer_example(): batch_size, input_dim, output_dim = 2, 4, 2 @mb.program(input_specs=[ mb.TensorSpec(shape=(batch_size, input_dim)), ]) def prog(x): # Weight W_val = (np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]).reshape(input_dim, output_dim).T.astype(np.float32)) W = mb.const(val=W_val, mode="file_value", name="const_W") # bias b_val = np.array([-0.5, 0.5]).astype(np.float32) b = mb.const(val=b_val, mode="file_value", name="const_b") return mb.linear(x=x, weight=W, bias=b, name="lin") logging.info("prog:\n", prog) proto = converter._convert(prog, convert_from="mil", convert_to="nn_proto") feed_dict = { "x": np.random.rand(batch_size, input_dim).astype(np.float32), } model = models.MLModel(proto) if model is None: raise AssertionError if ct.utils._is_macos(): prediction = model.predict(feed_dict) if len(prediction) != 1: raise AssertionError
def test_while_example(): def body(a, b): return mb.add(x=a, y=b), b def cond(a, b): a_mean = mb.reduce_mean(x=a, axes=[0, 1]) b_mean = mb.reduce_mean(x=b, axes=[0, 1]) return mb.less(x=a_mean, y=b_mean) @mb.program(input_specs=[ mb.TensorSpec(shape=(1, 2)), mb.TensorSpec(shape=(1, 2)), ]) def prog(a, b): return mb.while_loop(_cond=cond, _body=body, loop_vars=(a, b)) logging.info("prog:\n", prog) proto = converter._convert(prog, convert_from="mil", convert_to="nn_proto") feed_dict = { "a": np.random.rand(1, 2).astype(np.float32), "b": np.random.rand(1, 2).astype(np.float32), } model = models.MLModel(proto) if model is None: raise AssertionError if ct.utils._is_macos(): prediction = model.predict(feed_dict) if len(prediction) != 2: raise AssertionError
def test_tutorial(): from coremltools.converters.mil import Builder as mb @mb.program( input_specs=[mb.TensorSpec(shape=(1, 100, 100, 3)),] ) def prog(x): x = mb.relu(x=x, name="relu") x = mb.transpose(x=x, perm=[0, 3, 1, 2], name="transpose") x = mb.reduce_mean(x=x, axes=[2, 3], keep_dims=False, name="reduce") x = mb.log(x=x, name="log") y = mb.add(x=1, y=2) return x print("prog:\n", prog) # Convert and verify from coremltools.converters.mil.converter import _convert from coremltools import models proto = _convert(prog, convert_from="mil") model = models.MLModel(proto) # running predict() is only supported on macOS if ct.utils._is_macos(): prediction = model.predict( {"x": np.random.rand(1, 100, 100, 3).astype(np.float32),} ) assert len(prediction) == 1
def test_fusion_with_image_full(self): @mb.program(input_specs=[mb.TensorSpec(shape=(10, 20, 30, 3))]) def prog(x): x1 = mb.transpose(x=x, perm=[0, 3, 1, 2]) x2 = mb.relu(x=x) x3 = mb.transpose(x=x2, perm=[0, 3, 1, 2]) x4 = mb.add(x=x1, y=x3) return mb.relu(x=x4) proto = converter._convert(prog, inputs=[ImageType(name="x", shape=(10, 20, 30, 3), channel_first=False)], convert_from="mil", convert_to="nn_proto") model = models.MLModel(proto) assert model is not None assert len(model._spec.neuralNetwork.layers) == 3
def test_conv_example(): batch, C_in, C_out, H, W = 2, 2, 3, 7, 10 kH, kW = 3, 5 img_shape, seq_shape = (batch, C_in, H, W), (batch, C_in, H) @mb.program(input_specs=[ mb.TensorSpec(shape=img_shape), mb.TensorSpec(shape=seq_shape), ]) def prog(img, seq): ## 2D convolution # Weight W_2d = np.random.rand(C_out, C_in, kH, kW).astype(np.float32) W_2d = mb.const(val=W_2d, mode="file_value", name="const_W") # Test 1: provide only required arguments. conv1 = mb.conv(x=img, weight=W_2d, pad_type="valid") logging.info("conv1 shape: {}".format(conv1.shape)) # Test 2: stride > 1 conv2 = mb.conv(x=img, weight=W_2d, pad_type="valid", strides=[2, 3]) logging.info("conv2 shape: {}".format(conv2.shape)) # Test 3: same padding conv3 = mb.conv(x=img, weight=W_2d, pad_type="same", strides=[2, 3]) logging.info("conv3 shape: {}".format(conv3.shape)) # Test max_pool pool1 = mb.max_pool(x=img, kernel_sizes=[kH, kW], pad_type="valid", strides=[2, 3]) logging.info("pool1 shape: {}".format(pool1.shape)) # Test max_pool pool2 = mb.max_pool(x=img, kernel_sizes=[kH, kW], pad_type="same", strides=[2, 3]) logging.info("pool2 shape: {}".format(pool2.shape)) ## 1D convolution W_1d = np.random.rand(C_out, C_in, kH).astype(np.float32) W_1d = mb.const(val=W_1d, mode="file_value", name="const_W_1d") logging.info("W_1d val: {}".format(W_1d.val)) # Test 4: provide only required arguments for 1D. conv4 = mb.conv(x=seq, weight=W_1d, pad_type="valid") logging.info("conv4 shape: {}".format(conv4.shape)) return conv1, conv2, conv3, pool1, pool2, conv4 proto = converter._convert(prog, convert_from="mil", convert_to="nn_proto") feed_dict = { "img": np.random.rand(*img_shape).astype(np.float32), "seq": np.random.rand(*seq_shape).astype(np.float32), } model = models.MLModel(proto) if model is None: raise AssertionError if ct.utils._is_macos(): prediction = model.predict(feed_dict) if len(prediction) != 6: raise AssertionError
import coremltools.models as core import turicreate as tc model_name = input('Model Name: ') file_name = input('Name of training data: ') author = input('Author: ') license = input('License: ') short_description = input('Short Description: ') data = tc.SFrame(file_name) # formatting our data to an SFrame model = tc.sentence_classifier.create(data, 'Sentiment', features=['SentimentText']) model.save(model_name + '.model') model.export_coreml(model_name + '.mlmodel') mlmodel = core.MLModel(model_name + '.mlmodel') mlmodel.author = author mlmodel.license = license mlmodel.short_description = short_description mlmodel.save(model_name + '.mlmodel')