Example #1
0
def verify_min(input_dim):
    dtype = "float32"

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)
    a_np3 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.min((a_np1, a_np2, a_np3), axis=0)

    inputs = [
        ("input1", datatypes.Array(*input_dim)),
        ("input2", datatypes.Array(*input_dim)),
        ("input3", datatypes.Array(*input_dim)),
    ]
    output = [("output", datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(
        name="Min", input_names=["input1", "input2", "input3"], output_name="output", mode="MIN"
    )
    model = cm.models.MLModel(builder.spec)
    for target, dev in tvm.testing.enabled_targets():
        out = run_tvm_graph(
            model,
            target,
            dev,
            [a_np1, a_np2, a_np3],
            ["input1", "input2", "input3"],
            b_np.shape,
            dtype,
        )
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #2
0
def verify_image_scaler(input_dim, blue_bias=0.0, green_bias=0.0, red_bias=0.0, image_scale=1.0):
    dtype = "float32"
    a_np = np.random.uniform(size=input_dim).astype(dtype)
    # make sure it is valid image format CHW.
    assert len(a_np.shape) == 3 and a_np.shape[0] == 3
    b_np = np.zeros(a_np.shape, dtype=dtype)
    b_np[0, :, :] = image_scale * a_np[0, :, :] + blue_bias
    b_np[1, :, :] = image_scale * a_np[1, :, :] + green_bias
    b_np[2, :, :] = image_scale * a_np[2, :, :] + red_bias
    b_np = np.add(a_np, b_np)
    inputs = [("input1", datatypes.Array(*input_dim)), ("input2", datatypes.Array(*input_dim))]
    output = [("output", datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.set_pre_processing_parameters(
        image_input_names=["input1"],
        is_bgr=True,
        blue_bias=blue_bias,
        green_bias=green_bias,
        red_bias=red_bias,
        image_scale=image_scale,
    )
    # add one add layer to make CoreML model format valid
    # add layer has been tested before.
    builder.add_elementwise(
        name="add", input_names=["input1", "input2"], output_name="output", alpha=0, mode="ADD"
    )
    model = cm.models.MLModel(builder.spec)
    for target, dev in tvm.testing.enabled_targets():
        out = run_tvm_graph(
            model, target, dev, [a_np, a_np], ["input1", "input2"], b_np.shape, dtype
        )
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
    def test_simple_loop_fixed_iterations(self):
        input_features = [('data', datatypes.Array(1))]
        output_features = [('output', None)]

        builder_top = NeuralNetworkBuilder(input_features,
                                           output_features,
                                           disable_rank5_shape_mapping=True)
        builder_top.add_copy('copy_1', input_name='data', output_name='output')

        loop_layer = builder_top.add_loop('loop_layer')
        loop_layer.loop.maxLoopIterations = 5
        builder_body = NeuralNetworkBuilder(
            input_features=None,
            output_features=None,
            spec=None,
            nn_spec=loop_layer.loop.bodyNetwork)
        builder_body.add_elementwise('add',
                                     input_names=['output'],
                                     output_name='x',
                                     mode='ADD',
                                     alpha=2)

        builder_body.add_copy('copy_2', input_name='x', output_name='output')
        coremltools.models.utils.save_spec(
            builder_top.spec, '/tmp/simple_loop_fixed_iterations.mlmodel')
        mlmodel = MLModel(builder_top.spec)

        # True branch case
        input_dict = {'data': np.array([0], dtype='float')}
        output_ref = {'output': np.array([10], dtype='float')}
        self._test_model(mlmodel, input_dict, output_ref)
Example #4
0
def verify_min(input_dim):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)
    a_np3 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.min((a_np1, a_np2, a_np3), axis=0)

    inputs = [('input1', datatypes.Array(*input_dim)),
              ('input2', datatypes.Array(*input_dim)),
              ('input3', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='Min',
                            input_names=['input1', 'input2', 'input3'],
                            output_name='output',
                            mode='MIN')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model,
                           [a_np1, a_np2, a_np3],
                           ['input1', 'input2', 'input3'],
                           b_np.shape,
                           dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
    def test_simple_loop_fixed_iterations(self):
        input_features = [("data", datatypes.Array(1))]
        output_features = [("output", None)]

        builder_top = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        builder_top.add_copy("copy_1", input_name="data", output_name="output")

        loop_layer = builder_top.add_loop("loop_layer")
        loop_layer.loop.maxLoopIterations = 5
        builder_body = NeuralNetworkBuilder(
            input_features=None,
            output_features=None,
            spec=None,
            nn_spec=loop_layer.loop.bodyNetwork,
        )
        builder_body.add_elementwise(
            "add", input_names=["output"], output_name="x", mode="ADD", alpha=2
        )

        builder_body.add_copy("copy_2", input_name="x", output_name="output")
        coremltools.models.utils.save_spec(
            builder_top.spec, "/tmp/simple_loop_fixed_iterations.mlmodel"
        )
        mlmodel = MLModel(builder_top.spec)

        # True branch case
        input_dict = {"data": np.array([0], dtype="float")}
        output_ref = {"output": np.array([10], dtype="float")}
        self._test_model(mlmodel, input_dict, output_ref)
    def test_simple_branch(self):
        """ Test a simple if-else branch network
        """
        input_features = [('data', datatypes.Array(3)), ('cond', datatypes.Array(1))]
        output_features = [('output', None)]

        builder_top = NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
        layer = builder_top.add_branch('branch_layer', 'cond')

        builder_ifbranch = NeuralNetworkBuilder(input_features=None, output_features=None, spec=None, nn_spec=layer.branch.ifBranch)
        builder_ifbranch.add_elementwise('mult_layer', input_names=['data'], output_name='output', mode='MULTIPLY', alpha=10)
        builder_elsebranch = NeuralNetworkBuilder(input_features=None, output_features=None, spec=None, nn_spec=layer.branch.elseBranch)
        builder_elsebranch.add_elementwise('add_layer', input_names=['data'], output_name='output', mode='ADD', alpha=10)
        coremltools.models.utils.save_spec(builder_top.spec, '/tmp/simple_branch.mlmodel')
        mlmodel = MLModel(builder_top.spec)

        # True branch case
        input_dict = {'data': np.array(range(1,4), dtype='float'), 'cond': np.array([1], dtype='float')}
        output_ref = {'output': input_dict['data'] * 10}
        self._test_model(mlmodel, input_dict, output_ref)

        # False branch case
        input_dict['cond'] = np.array([0], dtype='float')
        output_ref['output'] = input_dict['data'] + 10
        self._test_model(mlmodel, input_dict, output_ref)
 def test_concat_converter(self):
     input_dim = (5, 1, 1)
     output_dim = (10, 1, 1)
     inputs = [('input1', datatypes.Array(*input_dim)), ('input2', datatypes.Array(*input_dim))]
     outputs = [('output', datatypes.Array(*output_dim))]
     builder = NeuralNetworkBuilder(inputs, outputs)
     builder.add_elementwise(name='Concate', input_names=['input1', 'input2'], output_name='output', mode='CONCAT')
     model_onnx = convert_coreml(builder.spec)
     self.assertTrue(model_onnx is not None)
 def test_dot_product_converter(self):
     input_dim = (3,)
     output_dim = (1,)
     inputs = [('input1', datatypes.Array(*input_dim)), ('input2', datatypes.Array(*input_dim))]
     output = [('output', datatypes.Array(*output_dim))]
     builder = NeuralNetworkBuilder(inputs, output)
     builder.add_elementwise(name='Dot', input_names=['input1', 'input2'], output_name='output', mode='DOT')
     model_onnx = convert_coreml(builder.spec)
     self.assertTrue(model_onnx is not None)
 def test_multiply_converter(self):
     input_dim = (1, 2, 2)
     output_dim = (1, 2, 2)
     inputs = [('input1', datatypes.Array(*input_dim)), ('input2', datatypes.Array(*input_dim))]
     output = [('output', datatypes.Array(*output_dim))]
     builder = NeuralNetworkBuilder(inputs, output)
     builder.add_elementwise(name='Mul', input_names=['input1', 'input2'], output_name='output', mode='MULTIPLY')
     model_onnx = convert_coreml(builder.spec)
     self.assertTrue(model_onnx is not None)
    def test_simple_branch(self):
        """ Test a simple if-else branch network
        """
        input_features = [("data", datatypes.Array(3)), ("cond", datatypes.Array(1))]
        output_features = [("output", None)]

        builder_top = NeuralNetworkBuilder(
            input_features, output_features, disable_rank5_shape_mapping=True
        )
        layer = builder_top.add_branch("branch_layer", "cond")

        builder_ifbranch = NeuralNetworkBuilder(
            input_features=None,
            output_features=None,
            spec=None,
            nn_spec=layer.branch.ifBranch,
        )
        builder_ifbranch.add_elementwise(
            "mult_layer",
            input_names=["data"],
            output_name="output",
            mode="MULTIPLY",
            alpha=10,
        )
        builder_elsebranch = NeuralNetworkBuilder(
            input_features=None,
            output_features=None,
            spec=None,
            nn_spec=layer.branch.elseBranch,
        )
        builder_elsebranch.add_elementwise(
            "add_layer",
            input_names=["data"],
            output_name="output",
            mode="ADD",
            alpha=10,
        )
        coremltools.models.utils.save_spec(
            builder_top.spec, "/tmp/simple_branch.mlmodel"
        )
        mlmodel = MLModel(builder_top.spec)

        # True branch case
        input_dict = {
            "data": np.array(range(1, 4), dtype="float"),
            "cond": np.array([1], dtype="float"),
        }
        output_ref = {"output": input_dict["data"] * 10}
        self._test_model(mlmodel, input_dict, output_ref)

        # False branch case
        input_dict["cond"] = np.array([0], dtype="float")
        output_ref["output"] = input_dict["data"] + 10
        self._test_model(mlmodel, input_dict, output_ref)
 def test_dot_product_converter(self):
     input_dim = (3, )
     output_dim = (1, )
     inputs = [('input1', datatypes.Array(*input_dim)),
               ('input2', datatypes.Array(*input_dim))]
     output = [('output', datatypes.Array(*output_dim))]
     builder = NeuralNetworkBuilder(inputs, output)
     builder.add_elementwise(name='Dot',
                             input_names=['input1', 'input2'],
                             output_name='output',
                             mode='DOT')
     context = ConvertContext()
     node = DotProductLayerConverter.convert(
         context, builder.spec.neuralNetwork.layers[0], ['input'],
         ['output'])
     self.assertTrue(node is not None)
 def test_concat_converter(self):
     input_dim = (5, 1, 1)
     output_dim = (10, 1, 1)
     inputs = [('input1', datatypes.Array(*input_dim)),
               ('input2', datatypes.Array(*input_dim))]
     outputs = [('output', datatypes.Array(*output_dim))]
     builder = NeuralNetworkBuilder(inputs, outputs)
     builder.add_elementwise(name='Concate',
                             input_names=['input1', 'input2'],
                             output_name='output',
                             mode='CONCAT')
     context = ConvertContext()
     node = ConcatLayerConverter.convert(
         context, builder.spec.neuralNetwork.layers[0], ['input'],
         ['output'])
     self.assertTrue(node is not None)
 def test_average_converter(self):
     input_dim = (1, 2, 2)
     output_dim = (1, 2, 2)
     inputs = [('input1', datatypes.Array(*input_dim)),
               ('input2', datatypes.Array(*input_dim))]
     output = [('output', datatypes.Array(*output_dim))]
     builder = NeuralNetworkBuilder(inputs, output)
     builder.add_elementwise(name='MEAN',
                             input_names=['input1', 'input2'],
                             output_name='output',
                             mode='AVE')
     context = ConvertContext()
     node = AverageLayerConverter.convert(
         context, builder.spec.neuralNetwork.layers[0], ['input'],
         ['output'])
     self.assertTrue(node is not None)
Example #14
0
    def test_image_input_type_converter(self):
        dim = (3, 15, 25)
        inputs = [('input', datatypes.Array(*dim))]
        outputs = [('output', datatypes.Array(*dim))]
        builder = NeuralNetworkBuilder(inputs, outputs)
        builder.add_elementwise(name='Identity',
                                input_names=['input'],
                                output_name='output',
                                mode='ADD',
                                alpha=0.0)
        spec = builder.spec
        input = spec.description.input[0]
        input.type.imageType.height = dim[1]
        input.type.imageType.width = dim[2]
        for coreml_colorspace, onnx_colorspace in (('RGB', 'Rgb8'), ('BGR',
                                                                     'Bgr8'),
                                                   ('GRAYSCALE', 'Gray8')):
            input.type.imageType.colorSpace = ImageFeatureType.ColorSpace.Value(
                coreml_colorspace)
            model_onnx = convert_coreml(spec)
            dims = [
                (d.dim_param or d.dim_value)
                for d in model_onnx.graph.input[0].type.tensor_type.shape.dim
            ]
            self.assertEqual(
                dims, ['None', 1 if onnx_colorspace == 'Gray8' else 3, 15, 25])

            if StrictVersion(onnx.__version__) >= StrictVersion('1.2.1'):
                metadata = {
                    prop.key: prop.value
                    for prop in model_onnx.metadata_props
                }
                self.assertEqual(metadata,
                                 {'Image.BitmapPixelFormat': onnx_colorspace})
                self.assertEqual(model_onnx.graph.input[0].type.denotation,
                                 'IMAGE')
                channel_denotations = [
                    d.denotation for d in
                    model_onnx.graph.input[0].type.tensor_type.shape.dim
                ]
                self.assertEqual(channel_denotations, [
                    'DATA_BATCH', 'DATA_CHANNEL', 'DATA_FEATURE',
                    'DATA_FEATURE'
                ])
Example #15
0
def verify_ConcatLayerParams(input1_dim, input2_dim):
    dtype = "float32"

    a_np1 = np.random.uniform(size=input1_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input2_dim).astype(dtype)

    b_np = np.concatenate((a_np1, a_np2), axis=1)
    inputs = [("input1", datatypes.Array(*input1_dim)), ("input2", datatypes.Array(*input2_dim))]
    output = [("output", datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(
        name="Concate", input_names=["input1", "input2"], output_name="output", mode="CONCAT"
    )
    model = cm.models.MLModel(builder.spec)
    for target, dev in tvm.testing.enabled_targets():
        out = run_tvm_graph(
            model, target, dev, [a_np1, a_np2], ["input1", "input2"], b_np.shape, dtype
        )
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #16
0
def verify_ConcatLayerParams(input1_dim, input2_dim):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input1_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input2_dim).astype(dtype)

    b_np = np.concatenate((a_np1, a_np2), axis=1)
    inputs = [('input1', datatypes.Array(*input1_dim)),
              ('input2', datatypes.Array(*input2_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='Concate',
                            input_names=['input1', 'input2'],
                            output_name='output',
                            mode='CONCAT')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #17
0
def verify_ConcatLayerParams(input1_dim, input2_dim):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input1_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input2_dim).astype(dtype)

    b_np = np.concatenate((a_np1, a_np2), axis=1)
    inputs = [('input1', datatypes.Array(*input1_dim)),
              ('input2', datatypes.Array(*input2_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='Concate',
                            input_names=['input1', 'input2'],
                            output_name='output',
                            mode='CONCAT')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #18
0
    def create_coreml_model():
        shape = (2,)
        alpha = 2

        inputs = [
            ("input0", coremltools.models.datatypes.Array(*shape)),
            ("input1", coremltools.models.datatypes.Array(*shape)),
        ]
        outputs = [
            ("output0", coremltools.models.datatypes.Array(*shape)),
            ("output1", coremltools.models.datatypes.Array(*shape)),
        ]
        builder = NeuralNetworkBuilder(inputs, outputs)
        builder.add_elementwise(
            name="Add", input_names=["input0", "input1"], output_name="output0", mode="ADD"
        )
        builder.add_elementwise(
            name="Mul", alpha=alpha, input_names=["input0"], output_name="output1", mode="MULTIPLY"
        )
        return coremltools.models.MLModel(builder.spec)
Example #19
0
def verify_MultiplyLayerParams(input_dim, alpha):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.multiply(a_np1, a_np2) * alpha
    inputs = [('input1', datatypes.Array(*input_dim)),
              ('input2', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='Mul',
                            alpha=alpha,
                            input_names=['input1', 'input2'],
                            output_name='output',
                            mode='MULTIPLY')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #20
0
def verify_average(input_dim1, input_dim2, axis=0):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim1).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim2).astype(dtype)

    b_np = np.mean((a_np1, a_np2), axis=axis)

    inputs = [('input1', datatypes.Array(*input_dim1)),
              ('input2', datatypes.Array(*input_dim2))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='MEAN',
                            input_names=['input1', 'input2'],
                            output_name='output',
                            mode='AVE')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #21
0
def verify_MultiplyLayerParams(input_dim, alpha):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.multiply(a_np1, a_np2) * alpha
    inputs = [('input1', datatypes.Array(*input_dim)),
              ('input2', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='Mul',
                            alpha=alpha,
                            input_names=['input1', 'input2'],
                            output_name='output',
                            mode='MULTIPLY')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, target, ctx, [a_np1, a_np2], ['input1', 'input2'], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #22
0
def verify_AddLayerParams(input_dim, alpha=2):
    dtype = "float32"

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.add(a_np1, a_np2) + alpha
    inputs = [("input1", datatypes.Array(*input_dim)),
              ("input2", datatypes.Array(*input_dim))]
    output = [("output", datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name="Add",
                            alpha=alpha,
                            input_names=["input1", "input2"],
                            output_name="output",
                            mode="ADD")
    model = cm.models.MLModel(builder.spec)
    for target, ctx in tvm.testing.enabled_targets():
        out = run_tvm_graph(model, target, ctx, [a_np1, a_np2],
                            ["input1", "input2"], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
    def create_coreml_model():
        shape = (2, )
        alpha = 2

        inputs = [('input0', coremltools.models.datatypes.Array(*shape)),
                  ('input1', coremltools.models.datatypes.Array(*shape))]
        outputs = [
            ('output0', coremltools.models.datatypes.Array(*shape)),
            ('output1', coremltools.models.datatypes.Array(*shape)),
        ]
        builder = NeuralNetworkBuilder(inputs, outputs)
        builder.add_elementwise(name='Add',
                                input_names=['input0', 'input1'],
                                output_name='output0',
                                mode='ADD')
        builder.add_elementwise(name='Mul',
                                alpha=alpha,
                                input_names=['input0'],
                                output_name='output1',
                                mode='MULTIPLY')
        return coremltools.models.MLModel(builder.spec)
Example #24
0
def verify_average(input_dim1, input_dim2, axis=0):
    dtype = "float32"

    a_np1 = np.random.uniform(size=input_dim1).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim2).astype(dtype)

    b_np = np.mean((a_np1, a_np2), axis=axis)

    inputs = [("input1", datatypes.Array(*input_dim1)),
              ("input2", datatypes.Array(*input_dim2))]
    output = [("output", datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name="MEAN",
                            input_names=["input1", "input2"],
                            output_name="output",
                            mode="AVE")
    model = cm.models.MLModel(builder.spec)
    for target, ctx in tvm.testing.enabled_targets():
        out = run_tvm_graph(model, target, ctx, [a_np1, a_np2],
                            ["input1", "input2"], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #25
0
def verify_min(input_dim):
    dtype = 'float32'

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)
    a_np3 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.min((a_np1, a_np2, a_np3), axis=0)

    inputs = [('input1', datatypes.Array(*input_dim)),
              ('input2', datatypes.Array(*input_dim)),
              ('input3', datatypes.Array(*input_dim))]
    output = [('output', datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(name='Min',
                            input_names=['input1', 'input2', 'input3'],
                            output_name='output',
                            mode='MIN')
    model = cm.models.MLModel(builder.spec)
    for target, ctx in ctx_list():
        out = run_tvm_graph(model, [a_np1, a_np2, a_np3],
                            ['input1', 'input2', 'input3'], b_np.shape, dtype)
        np.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #26
0
def verify_multiply_layer_params(input_dim, alpha):
    """Verify multiply layer params"""
    dtype = "float32"

    a_np1 = np.random.uniform(size=input_dim).astype(dtype)
    a_np2 = np.random.uniform(size=input_dim).astype(dtype)

    b_np = np.multiply(a_np1, a_np2) * alpha
    inputs = [("input1", datatypes.Array(*input_dim)),
              ("input2", datatypes.Array(*input_dim))]
    output = [("output", datatypes.Array(*b_np.shape))]
    builder = NeuralNetworkBuilder(inputs, output)
    builder.add_elementwise(
        name="Mul",
        alpha=alpha,
        input_names=["input1", "input2"],
        output_name="output",
        mode="MULTIPLY",
    )
    model = cm.models.MLModel(builder.spec)
    for target, dev in tvm.testing.enabled_targets():
        out = run_tvm_graph(model, target, dev, [a_np1, a_np2],
                            ["input1", "input2"], b_np.shape, dtype)
        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
Example #27
0
def make_mlmodel(variables):
    # Specify the inputs and outputs (there can be multiple).
    # Each name corresponds to the input_name/output_name of a layer in the network so
    # that Core ML knows where to insert and extract data.
    input_features = [('image', datatypes.Array(1, IMAGE_HEIGHT, IMAGE_WIDTH))]
    output_features = [('labelValues', datatypes.Array(NUM_LABEL_INDEXES))]
    builder = NeuralNetworkBuilder(input_features, output_features, mode=None)

    # The "name" parameter has no effect on the function of the network. As far as I know
    # it's only used when Xcode fails to load your mlmodel and gives you an error telling
    # you what the problem is.
    # The input_names and output_name are used to link layers to each other and to the
    # inputs and outputs of the model. When adding or removing layers, or renaming their
    # outputs, always make sure you correct the input and output names of the layers
    # before and after them.
    builder.add_elementwise(name='add_layer',
                            input_names=['image'],
                            output_name='add_layer',
                            mode='ADD',
                            alpha=-0.5)

    # Although Core ML internally uses weight matrices of shape
    # (outputChannels, inputChannels, height, width) (as can be found by looking at the
    # protobuf specification comments), add_convolution takes the shape
    # (height, width, inputChannels, outputChannels) (as can be found in the coremltools
    # documentation). The latter shape matches what TensorFlow uses so we don't need to
    # reorder the matrix axes ourselves.
    builder.add_convolution(name='conv2d_1',
                            kernel_channels=1,
                            output_channels=32,
                            height=3,
                            width=3,
                            stride_height=1,
                            stride_width=1,
                            border_mode='same',
                            groups=0,
                            W=variables['W_conv1'].eval(),
                            b=variables['b_conv1'].eval(),
                            has_bias=True,
                            is_deconv=False,
                            output_shape=None,
                            input_name='add_layer',
                            output_name='conv2d_1')

    builder.add_activation(name='relu_1',
                           non_linearity='RELU',
                           input_name='conv2d_1',
                           output_name='relu_1',
                           params=None)

    builder.add_pooling(name='maxpool_1',
                        height=2,
                        width=2,
                        stride_height=2,
                        stride_width=2,
                        layer_type='MAX',
                        padding_type='SAME',
                        input_name='relu_1',
                        output_name='maxpool_1')

    # ...

    builder.add_flatten(name='maxpool_3_flat',
                        mode=1,
                        input_name='maxpool_3',
                        output_name='maxpool_3_flat')

    # We must swap the axes of the weight matrix because add_inner_product takes the shape
    # (outputChannels, inputChannels) whereas TensorFlow uses
    # (inputChannels, outputChannels). Unlike with add_convolution (see the comment
    # above), the shape add_inner_product expects matches what the protobuf specification
    # requires for inner products.
    builder.add_inner_product(name='fc1',
                              W=tf_fc_weights_order_to_mlmodel(
                                  variables['W_fc1'].eval()).flatten(),
                              b=variables['b_fc1'].eval().flatten(),
                              input_channels=6 * 6 * 64,
                              output_channels=1024,
                              has_bias=True,
                              input_name='maxpool_3_flat',
                              output_name='fc1')

    # ...

    builder.add_softmax(name='softmax',
                        input_name='fc2',
                        output_name='labelValues')

    model = MLModel(builder.spec)

    model.short_description = 'Model for recognizing a variety of images drawn on screen with one\'s finger'

    model.input_description['image'] = 'A gesture image to classify'
    model.output_description[
        'labelValues'] = 'The "probability" of each label, in a dense array'

    return model
Example #28
0
#!/usr/bin/env python3
import numpy as np
import coremltools as ct
from coremltools.models.neural_network import datatypes, NeuralNetworkBuilder

# KxK GEMM with bias
K = 64

input_features = [('image', datatypes.Array(K))]
input_features2 = [('image2', datatypes.Array(K))]
output_features = [('probs', datatypes.Array(K))]

weights = np.zeros((K, K)) + 3
bias = np.ones(K)

builder = NeuralNetworkBuilder(input_features+input_features2, output_features)

#builder.add_inner_product(name='ip_layer', W=weights, b=None, input_channels=K, output_channels=K, has_bias=False, input_name='image', output_name='med')
#builder.add_inner_product(name='ip_layer_2', W=weights, b=None, input_channels=3, output_channels=3, has_bias=False, input_name='med', output_name='probs')
builder.add_elementwise(name='element', input_names=['image', 'image2'], output_name='probs', mode='ADD')
#builder.add_bias(name='bias', b=bias, input_name='med', output_name='probs', shape_bias=(K,))
#builder.add_activation(name='act_layer', non_linearity='SIGMOID', input_name='med', output_name='probs')

# compile the spec
mlmodel = ct.models.MLModel(builder.spec)

# trigger the ANE!
out = mlmodel.predict({"image": np.zeros(K, dtype=np.float32)+1, "image2": np.zeros(K, dtype=np.float32)+2})
print(out)
mlmodel.save('test.mlmodel')