def get_custom_model_spec():
            from coremltools.models.neural_network import NeuralNetworkBuilder
            from coremltools.models.datatypes import Array

            input_name = "output1"
            input_length = self._feature_extractor.output_length
            builder = NeuralNetworkBuilder(
                [(input_name, Array(input_length,))],
                [(prob_name, Array(self.num_classes,))],
                "classifier",
            )
            layer_counter = [0]
            builder.set_input([input_name], [(input_length,)])

            def next_layer_name():
                layer_counter[0] += 1
                return "layer_%d" % layer_counter[0]

            for i, cur_layer in enumerate(self._custom_classifier.export_weights()):
                W = cur_layer["weight"]
                nC, nB = W.shape
                Wb = cur_layer["bias"]

                output_name = next_layer_name()
                builder.add_inner_product(
                    name="inner_product_" + str(i),
                    W=W,
                    b=Wb,
                    input_channels=nB,
                    output_channels=nC,
                    has_bias=True,
                    input_name=input_name,
                    output_name=output_name,
                )

                input_name = output_name

                if cur_layer["act"]:
                    output_name = next_layer_name()
                    builder.add_activation(
                        "activation" + str(i), "RELU", input_name, output_name
                    )
                    input_name = output_name

            builder.add_softmax("softmax", input_name, prob_name)
            builder.set_class_labels(
                self.classes,
                predicted_feature_name=self.target,
                prediction_blob=prob_name,
            )
            return builder.spec
Esempio n. 2
0
        def get_custom_model_spec():
            from coremltools.models.neural_network import NeuralNetworkBuilder
            from coremltools.models.datatypes import Array, Dictionary, String

            input_name = 'output1'
            input_length = self._feature_extractor.output_length
            builder = NeuralNetworkBuilder(
                [(input_name, Array(input_length, ))],
                [(prob_name, Dictionary(String))], 'classifier')

            ctx = _mxnet_utils.get_mxnet_context()[0]
            input_name, output_name = input_name, 0
            import mxnet as _mx
            for i, cur_layer in enumerate(self._custom_classifier):
                output_name = str(i)
                if type(cur_layer) == _mx.gluon.nn.basic_layers.Dense:
                    W = cur_layer.weight.data(ctx).asnumpy()
                    nC, nB = W.shape
                    Wb = cur_layer.bias.data(ctx).asnumpy()

                    builder.add_inner_product(name='inner_product_' + str(i),
                                              W=W,
                                              b=Wb,
                                              input_channels=nB,
                                              output_channels=nC,
                                              has_bias=True,
                                              input_name=input_name,
                                              output_name='inner_product_' +
                                              output_name)
                    if cur_layer.act:
                        builder.add_activation("activation" + str(i), 'RELU',
                                               'inner_product_' + output_name,
                                               output_name)
                elif type(cur_layer) == _mx.gluon.nn.basic_layers.BatchNorm:
                    zeros = _np.zeros(nC)
                    ones = _np.ones(nC)
                    builder.add_batchnorm(name='bn_layer_' + str(i),
                                          channels=nC,
                                          gamma=ones,
                                          beta=zeros,
                                          mean=zeros,
                                          variance=ones,
                                          input_name=input_name,
                                          output_name=output_name)
                elif type(cur_layer) == _mx.gluon.nn.basic_layers.Dropout:
                    continue
                input_name = output_name

            last_output = builder.spec.neuralNetworkClassifier.layers[
                -1].output[0]
            builder.add_softmax('softmax', last_output, self.target)

            builder.set_class_labels(self.classes)
            builder.set_input([input_name], [(input_length, )])
            builder.set_output([self.target], [(self.num_classes, )])

            return builder.spec
Esempio n. 3
0
        def get_custom_model_spec():
            from coremltools.models.neural_network import NeuralNetworkBuilder
            from coremltools.models.datatypes import Array, Dictionary, String

            input_name = 'output1'
            input_length = self._feature_extractor.output_length
            builder = NeuralNetworkBuilder(
                [(input_name, Array(input_length, ))],
                [(prob_name, Dictionary(String))], 'classifier')

            input_name, output_name = input_name, 0
            for i, cur_layer in enumerate(
                    self._custom_classifier.export_weights()):
                W = cur_layer['weight']
                nC, nB = W.shape
                Wb = cur_layer['bias']

                builder.add_inner_product(name="inner_product_" + str(i),
                                          W=W,
                                          b=Wb,
                                          input_channels=nB,
                                          output_channels=nC,
                                          has_bias=True,
                                          input_name=str(input_name),
                                          output_name='inner_product_' +
                                          str(output_name))

                if cur_layer['act']:
                    builder.add_activation("activation" + str(i), 'RELU',
                                           'inner_product_' + str(output_name),
                                           str(output_name))

                input_name = i
                output_name = i + 1

            last_output = builder.spec.neuralNetworkClassifier.layers[
                -1].output[0]
            builder.add_softmax('softmax', last_output, self.target)

            builder.set_class_labels(self.classes,
                                     predicted_feature_name=self.target)
            builder.set_input([input_name], [(input_length, )])
            builder.set_output([self.target], [(self.num_classes, )])

            return builder.spec
Esempio n. 4
0
def load(prog, **kwargs):
    if "main" not in prog.functions:
        msg = "main function not found in program {}"
        raise ValueError(msg.format(prog))
    if len(prog.functions) != 1:
        msg = ("Program must have exactly one `main` function to "
               "convert to NN. Program: {}")
        raise ValueError(msg.format(prog))

    nn_backend_passes(prog)
    input_types = prog.main_input_types
    output_types = prog.main_output_types

    v1_inputs = []
    symbolic_inputs = {}
    for name, var in prog.functions["main"].inputs.items():
        if types.is_tensor(var.sym_type):
            sym_shape = var.sym_type.get_shape()
            if any_variadic(sym_shape):
                raise NotImplementedError("Variadic rank is not supported")
            if any_symbolic(sym_shape):
                user_specified = False
                for input_type in input_types:
                    if name == input_type.name:
                        sym_shape = input_type.shape.default
                        user_specified = True
                        break
                # Use dummy static shape, and will set it later.
                shape = [1 if is_symbolic(d) else d for d in sym_shape]
                if not user_specified:
                    symbolic_inputs[name] = sym_shape
            else:
                shape = sym_shape
            v1_inputs.append((name, Array(*shape)))
        elif types.is_scalar(var.sym_type):
            v1_inputs.append((name, Array(1)))
        else:
            raise NotImplementedError()

    v1_outputs = []
    for var in prog.functions["main"].outputs:
        if types.is_tensor(var.sym_type) or types.is_primitive(var.sym_type):
            # Disregard the output types
            v1_outputs.append((var.name, None))
        else:
            raise NotImplementedError()

    # create neural network builder
    builder = neural_network.NeuralNetworkBuilder(
        v1_inputs,
        v1_outputs,
        disable_rank5_shape_mapping=True,
        use_float_arraytype=True,
    )

    # const in V2 are added lazily to V1 by each op whenever needed.
    # `const_context` stores the const names we've added so far and avoid
    # adding a const more than once.
    # const_context: list[set of str] (const name for v1 & v2
    # (the same)). Note that in NN in outer layer is visible from the inner
    # layer, so the const_context is simply a stack of set.
    const_context = []
    # Iterate through ops and add to builder
    convert_ops(
        const_context,
        builder,
        prog.functions["main"].operations,
        prog.functions["main"].outputs,
    )

    proto = builder.spec
    # image input
    has_image_input = any([isinstance(s, ImageType) for s in input_types])
    if has_image_input:
        proto = _convert_to_image_input(proto,
                                        input_types,
                                        skip_model_load=kwargs.get(
                                            "skip_model_load", False))

    # image output
    if output_types is not None:
        assert len(output_types) == len(prog.functions["main"].outputs), \
                "number of mil program outputs do not match the number of outputs provided by the user"
        for i, output_proto_desc in enumerate(proto.description.output):
            output_var = prog.functions["main"].outputs[i]
            if isinstance(output_types[i], ImageType):
                if not types.is_tensor(var.sym_type):
                    raise ValueError(
                        "Image output, '{}', is a scalar, but it should be a tensor of rank 4"
                        .format(var.name))
                shape = var.sym_type.get_shape()
                if any_variadic(shape):
                    raise ValueError(
                        "Variable rank model outputs, that are ImageTypes, are not supported"
                    )
                if any([is_symbolic(d) for d in shape]):
                    raise NotImplementedError(
                        "Image output '{}' has symbolic dimensions in its shape"
                        .format(var.name))
                _validate_image_input_output_shapes(
                    output_types[i].color_layout,
                    shape,
                    var.name,
                    is_input=False)
                clr_space = _get_colorspace_enum(output_types[i].color_layout)
                output_proto_desc.type.imageType.colorSpace = clr_space
                output_proto_desc.type.imageType.width = shape[-1]
                output_proto_desc.type.imageType.height = shape[-2]

    # classifier flag
    classifier_config = kwargs.get("classifier_config", None)
    if classifier_config is not None:
        # verify that classifier_config.predicted_probabilities_output if its exists.
        # And if its empty/None, fill it with the last non const op's output
        # this is done in "_get_probability_var_for_classifier()"
        probability_var = _get_probability_var_for_classifier(
            prog, classifier_config)
        if classifier_config.predicted_probabilities_output != probability_var.name:
            classifier_config.predicted_probabilities_output = probability_var.name
        # add classifier related fields to the proto spec
        proto = _convert_to_classifier(proto,
                                       classifier_config,
                                       skip_model_load=kwargs.get(
                                           "skip_model_load", False))

    _set_user_inputs(proto, input_types)
    _set_symbolic_inputs(proto, symbolic_inputs)
    _set_optional_inputs(proto, input_types)

    return proto
Esempio n. 5
0
    def test_random_sparse_data(self):

        n_columns = 8
        n_categories = 20

        import numpy.random as rn
        rn.seed(0)
        categories = rn.randint(50000, size=(n_columns, n_categories))

        for dt in ['int32', 'float32', 'float64']:

            _X = np.array([[
                categories[j, rn.randint(n_categories)]
                for j in range(n_columns)
            ] for i in range(100)],
                          dtype=dt)

            # Test this data on a bunch of possible inputs.
            for sparse in (True, False):
                for categorical_features in [
                        'all', [3], [4],
                        range(2, 8),
                        range(0, 4),
                        range(0, 8)
                ]:
                    X = _X.copy()

                    # This appears to be the only type now working.
                    assert X.dtype == np.dtype(dt)

                    model = OneHotEncoder(
                        categorical_features=categorical_features,
                        sparse=sparse)
                    model.fit(X)

                    # Convert the model
                    spec = sklearn.convert(model, [('data', Array(n_columns))],
                                           'out')

                    if macos_version() >= (10, 13):
                        X_out = model.transform(X)
                        if sparse:
                            X_out = X_out.todense()

                        input_data = [{'data': row} for row in X]
                        output_data = [{"out": row} for row in X_out]

                        result = evaluate_transformer(spec, input_data,
                                                      output_data)

                        assert result["num_errors"] == 0

            # Test normal data inside a pipeline
            for sparse in (True, False):
                for categorical_features in [
                        'all', [3], [4],
                        range(2, 8),
                        range(0, 4),
                        range(0, 8)
                ]:
                    X = _X.copy()

                    model = Pipeline([
                        ("OHE",
                         OneHotEncoder(
                             categorical_features=categorical_features,
                             sparse=sparse)), ("Normalizer", Normalizer())
                    ])

                    model.fit(X)

                    # Convert the model
                    spec = sklearn.convert(model, [('data', Array(n_columns))],
                                           'out').get_spec()

                    if macos_version() >= (10, 13):
                        X_out = model.transform(X)
                        if sparse:
                            X_out = X_out.todense()

                        input_data = [{'data': row} for row in X]
                        output_data = [{"out": row} for row in X_out]

                        result = evaluate_transformer(spec, input_data,
                                                      output_data)

                        assert result["num_errors"] == 0
    def test_random_sparse_data(self):

        n_columns = 8
        n_categories = 20

        import numpy.random as rn

        rn.seed(0)
        categories = rn.randint(50000, size=(n_columns, n_categories))

        for dt in ["int32", "float32", "float64"]:

            _X = np.array(
                [[
                    categories[j, rn.randint(n_categories)]
                    for j in range(n_columns)
                ] for i in range(100)],
                dtype=dt,
            )

            # Test this data on a bunch of possible inputs.
            for sparse in (True, False):
                for categorical_features in [
                        "all",
                    [3],
                    [4],
                        range(2, 8),
                        range(0, 4),
                        range(0, 8),
                ]:
                    X = _X.copy()

                    # This appears to be the only type now working.
                    if X.dtype != np.dtype(dt):
                        raise AssertionError

                    model = OneHotEncoder(
                        categorical_features=categorical_features,
                        sparse=sparse)
                    model.fit(X)

                    # Convert the model
                    spec = sklearn.convert(model, [("data", Array(n_columns))],
                                           "out")

                    X_out = model.transform(X)
                    if sparse:
                        X_out = X_out.todense()

                    input_data = [{"data": row} for row in X]
                    output_data = [{"out": row} for row in X_out]

                    result = evaluate_transformer(spec, input_data,
                                                  output_data)

                    if result["num_errors"] != 0:
                        raise AssertionError

            # Test normal data inside a pipeline
            for sparse in (True, False):
                for categorical_features in [
                        "all",
                    [3],
                    [4],
                        range(2, 8),
                        range(0, 4),
                        range(0, 8),
                ]:
                    X = _X.copy()

                    model = Pipeline([
                        (
                            "OHE",
                            OneHotEncoder(
                                categorical_features=categorical_features,
                                sparse=sparse,
                            ),
                        ),
                        ("Normalizer", Normalizer()),
                    ])

                    model.fit(X)

                    # Convert the model
                    spec = sklearn.convert(model, [("data", Array(n_columns))],
                                           "out").get_spec()

                    X_out = model.transform(X)
                    if sparse:
                        X_out = X_out.todense()

                    input_data = [{"data": row} for row in X]
                    output_data = [{"out": row} for row in X_out]

                    result = evaluate_transformer(spec, input_data,
                                                  output_data)

                    if result["num_errors"] != 0:
                        raise AssertionError