コード例 #1
0
 def test_batchnorm_converter(self):
     input_dim = (3, )
     output_dim = (3, )
     input = [('input', datatypes.Array(*input_dim))]
     output = [('output', datatypes.Array(*output_dim))]
     builder = NeuralNetworkBuilder(input, output)
     gamma = numpy.ndarray(shape=(3, ))
     gamma[:] = [-1, 0, 1]
     beta = numpy.ndarray(shape=(3, ))
     beta[:] = [10, 20, 30]
     mean = numpy.ndarray(shape=(3, ))
     mean[:] = [0, 0, 0]
     variance = numpy.ndarray(shape=(3, ))
     variance[:] = [1, 1, 1]
     builder.add_batchnorm(name='BatchNormalize',
                           channels=3,
                           gamma=gamma,
                           beta=beta,
                           mean=mean,
                           variance=variance,
                           input_name='input',
                           output_name='output')
     context = ConvertContext()
     node = BatchnormLayerConverter.convert(
         context, builder.spec.neuralNetwork.layers[0], ['input'],
         ['output'])
     self.assertTrue(node is not None)
コード例 #2
0
        def get_custom_model_spec():
            from coremltools.models.neural_network import NeuralNetworkBuilder
            from coremltools.models.datatypes import Array, Dictionary, String

            input_name = 'output1'
            input_length = self._feature_extractor.output_length
            builder = NeuralNetworkBuilder(
                [(input_name, Array(input_length, ))],
                [(prob_name, Dictionary(String))], 'classifier')

            ctx = _mxnet_utils.get_mxnet_context()[0]
            input_name, output_name = input_name, 0
            import mxnet as _mx
            for i, cur_layer in enumerate(self._custom_classifier):
                output_name = str(i)
                if type(cur_layer) == _mx.gluon.nn.basic_layers.Dense:
                    W = cur_layer.weight.data(ctx).asnumpy()
                    nC, nB = W.shape
                    Wb = cur_layer.bias.data(ctx).asnumpy()

                    builder.add_inner_product(name='inner_product_' + str(i),
                                              W=W,
                                              b=Wb,
                                              input_channels=nB,
                                              output_channels=nC,
                                              has_bias=True,
                                              input_name=input_name,
                                              output_name='inner_product_' +
                                              output_name)
                    if cur_layer.act:
                        builder.add_activation("activation" + str(i), 'RELU',
                                               'inner_product_' + output_name,
                                               output_name)
                elif type(cur_layer) == _mx.gluon.nn.basic_layers.BatchNorm:
                    zeros = _np.zeros(nC)
                    ones = _np.ones(nC)
                    builder.add_batchnorm(name='bn_layer_' + str(i),
                                          channels=nC,
                                          gamma=ones,
                                          beta=zeros,
                                          mean=zeros,
                                          variance=ones,
                                          input_name=input_name,
                                          output_name=output_name)
                elif type(cur_layer) == _mx.gluon.nn.basic_layers.Dropout:
                    continue
                input_name = output_name

            last_output = builder.spec.neuralNetworkClassifier.layers[
                -1].output[0]
            builder.add_softmax('softmax', last_output, self.target)

            builder.set_class_labels(self.classes)
            builder.set_input([input_name], [(input_length, )])
            builder.set_output([self.target], [(self.num_classes, )])

            return builder.spec
コード例 #3
0
 def test_batchnorm_converter(self):
     input_dim = (3,)
     output_dim = (3,)
     input = [('input', datatypes.Array(*input_dim))]
     output = [('output', datatypes.Array(*output_dim))]
     builder = NeuralNetworkBuilder(input, output)
     gamma = numpy.ndarray(shape=(3,))
     gamma[:] = [-1, 0, 1]
     beta = numpy.ndarray(shape=(3,))
     beta[:] = [10, 20, 30]
     mean = numpy.ndarray(shape=(3,))
     mean[:] = [0, 0, 0]
     variance = numpy.ndarray(shape=(3,))
     variance[:] = [1, 1, 1]
     builder.add_batchnorm(name='BatchNormalize', channels=3, gamma=gamma, beta=beta, mean=mean, variance=variance,
                           input_name='input', output_name='output')
     model_onnx = convert_coreml(builder.spec)
     self.assertTrue(model_onnx is not None)