Exemplo n.º 1
0
    def test_convolution_layer(self):
        """Test a model with a single CNTK Convolution layer against the
        equivalent ELL predictor. This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        # Create a Convolution CNTK layer with no bias or activation,
        # auto-padding, stride of 1
        convolutionLayer = Convolution((3, 3), 5, pad=(
            True, True), strides=1, bias=False, init=0)
        x = input((2, 3, 4))  # Input order for CNTK is channels, rows, columns
        cntkModel = convolutionLayer(x)

        # Create a test set of weights to use for both CNTK and ELL layers
        # CNTK has these in filters, channels, rows, columns order
        weightValues = np.arange(90, dtype=np.float_).reshape(5, 2, 3, 3)

        # Set the weights
        convolutionLayer.parameters[0].value = weightValues

        # create an ELL Tensor from the cntk weights, which re-orders the
        # weights and produces an appropriately dimensioned tensor
        weightTensor = cntk_converters.\
            get_float_tensor_from_cntk_convolutional_weight_parameter(
                convolutionLayer.parameters[0])

        # Create the equivalent ELL predictor
        layerParameters = ell.LayerParameters(
            # Input order for ELL is rows, columns, channels. Account for
            # padding.
            ell.TensorShape(3 + 2, 4 + 2, 2),
            ell.ZeroPadding(1),
            ell.TensorShape(3, 4, 5),
            ell.NoPadding())

        convolutionalParameters = ell.ConvolutionalParameters(3, 1, 0, 5)

        layer = ell.FloatConvolutionalLayer(
            layerParameters, convolutionalParameters, weightTensor)
        predictor = ell.FloatNeuralNetworkPredictor([layer])

        # Get the results for both
        inputValues = np.arange(24, dtype=np.float32).reshape(2, 3, 4)
        cntkResults = cntkModel(inputValues)
        orderedCntkResults = cntk_converters.get_float_vector_from_cntk_array(
            cntkResults)
        orderedInputValues = cntk_converters.get_float_vector_from_cntk_array(
            inputValues)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results
        np.testing.assert_array_equal(
            orderedCntkResults, ellResults,
            'results for Convolution layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(
            predictor, orderedInputValues, orderedCntkResults, "convolution",
            "test")
Exemplo n.º 2
0
def process_network(network, weightsData, convolutionOrder):
    """Returns an ell.FloatNeuralNetworkPredictor as a result of parsing the network layers"""
    ellLayers = []

    for layer in network:
        if layer['type'] == 'net':
            pass
        elif layer['type'] == 'convolutional':
            ellLayers += process_convolutional_layer(layer, weightsData,
                                                     convolutionOrder)
        elif layer['type'] == 'connected':
            ellLayers += process_fully_connected_layer(layer, weightsData)
        elif layer['type'] == 'maxpool':
            ellLayers.append(get_pooling_layer(layer, ell.PoolingType.max))
        elif layer['type'] == 'avgpool':
            ellLayers.append(get_pooling_layer(layer, ell.PoolingType.mean))
        elif layer['type'] == 'softmax':
            ellLayers.append(get_softmax_layer(layer))
        else:
            print("Skipping, ", layer['type'], "layer")
            print()

    if ellLayers:
        # Darknet expects the input to be between 0 and 1, so prepend
        # a scaling layer with a scale factor of 1/255
        parameters = ellLayers[0].parameters
        ellLayers = [get_first_scaling_layer(parameters)] + ellLayers

    predictor = ell.FloatNeuralNetworkPredictor(ellLayers)
    return predictor
Exemplo n.º 3
0
def predictor_from_cntk_model(modelFile, plotModel=False):
    """Loads a CNTK model and returns an ell.NeuralNetworkPredictor"""

    print("Loading...")
    z = load_model(modelFile)
    print("\nFinished loading.")

    if plotModel:
        filename = os.path.join(os.path.dirname(modelFile),
                                os.path.basename(modelFile) + ".png")
        cntk_utilities.plot_model(z, filename)

    print("Pre-processing...")
    modelLayers = cntk_utilities.get_model_layers(z)

    # Get the relevant CNTK layers that we will convert to ELL
    layersToConvert = cntk_layers.get_filtered_layers_list(modelLayers)
    print("\nFinished pre-processing.")

    predictor = None

    try:
        # Create a list of ELL layers from the CNTK layers
        ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(
            layersToConvert)
        # Create an ELL neural network predictor from the layers
        predictor = ell.FloatNeuralNetworkPredictor(ellLayers)
    except BaseException as exception:
        print("Error occurred attempting to convert cntk layers to ELL layers")
        raise exception

    return predictor
Exemplo n.º 4
0
    def get_predictor(self, layer):

        ell_layers = []
        # remove output_padding from because CNTK doesn't have output padding.
        layer.layer.ell_outputPaddingParameters = ell.PaddingParameters(
            ell.PaddingScheme.zeros, 0)
        layer.layer.ell_outputShape = cntk_utilities.get_adjusted_shape(
            layer.layer.output.shape, layer.layer.ell_outputPaddingParameters)
        layer.process(ell_layers)
        # Create an ELL neural network predictor from the relevant CNTK layers
        return ell.FloatNeuralNetworkPredictor(ell_layers)
Exemplo n.º 5
0
    def test_prelu_activation_layer(self):
        """Test a model with a single CNTK PReLU activation layer against the
        equivalent ELL predictor. This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        # Create a test set of alpha parameters to use for both CNTK and ELL
        # layers
        # Input order for CNTK is channels, rows, columns
        alphaValues = np.linspace(
            1, 2, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)

        # create an ELL Tensor from the alpha parameters, which re-orders and
        # produces an appropriately dimensioned tensor
        alphaTensor = cntk_converters.\
            get_float_tensor_from_cntk_convolutional_weight_value_shape(
                alphaValues, alphaValues.shape)

        inputValues = np.linspace(
            -5, 5, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)

        # Evaluate a PReLU CNTK layer
        x = input((16, 10, 10))
        p = parameter(shape=x.shape, init=alphaValues, name="prelu")
        cntkModel = param_relu(p, x)

        # Create the equivalent ELL predictor
        layerParameters = ell.LayerParameters(
            # Input order for ELL is rows, columns, channels
            ell.TensorShape(10, 10, 16),
            ell.NoPadding(),
            ell.TensorShape(10, 10, 16),
            ell.NoPadding())
        layer = ell.FloatPReLUActivationLayer(layerParameters, alphaTensor)
        predictor = ell.FloatNeuralNetworkPredictor([layer])

        cntkResults = cntkModel(inputValues)
        orderedCntkResults = cntk_converters.get_float_vector_from_cntk_array(
            cntkResults)
        orderedInputValues = cntk_converters.get_float_vector_from_cntk_array(
            inputValues)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results
        np.testing.assert_array_equal(
            orderedCntkResults, ellResults,
            'results for PReLU Activation layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(
            predictor, orderedInputValues, orderedCntkResults,
            "prelu_activation", "test")
Exemplo n.º 6
0
    def test_dense_layer(self):
        """Test a model with a single CNTK Dense layer against the equivalent
        ELL predictor. This verifies that the import functions reshape and
        reorder values appropriately and that the equivalent ELL layer
        produces comparable output
        """

        # Create a Dense CNTK layer with no bias or activation
        denseLayer = Dense(5, bias=False)
        x = input((2, 3, 4))  # Input order for CNTK is channels, rows, columns
        cntkModel = denseLayer(x)

        # Create a test set of weights to use for both CNTK and ELL layers
        # CNTK has these in channels, rows, columns, [output shape] order
        weightValues = np.arange(120, dtype=np.float_).reshape(2, 3, 4, 5)

        # Set the weights
        denseLayer.parameters[0].value = weightValues

        # create an ELL Tensor from the cntk weights, which re-orders the
        # weights and produces an appropriately dimensioned tensor
        weightTensor = cntk_converters.\
            get_float_tensor_from_cntk_dense_weight_parameter(
                denseLayer.parameters[0])

        # Create the equivalent ELL predictor
        layerParameters = ell.LayerParameters(
            # Input order for ELL is rows, columns, channels
            ell.TensorShape(3, 4, 2),
            ell.NoPadding(),
            ell.TensorShape(1, 1, 5),
            ell.NoPadding())

        layer = ell.FloatFullyConnectedLayer(layerParameters, weightTensor)
        predictor = ell.FloatNeuralNetworkPredictor([layer])

        # Get the results for both
        inputValues = np.arange(24, dtype=np.float32).reshape(2, 3, 4)
        orderedInputValues = cntk_converters.get_float_vector_from_cntk_array(
            inputValues)
        cntkResults = cntkModel(inputValues)
        orderedCntkResults = cntk_converters.get_float_vector_from_cntk_array(
            cntkResults)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results
        np.testing.assert_array_equal(
            orderedCntkResults, ellResults,
            'results for Dense layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(predictor, orderedInputValues, orderedCntkResults,
                             "dense", "test")
Exemplo n.º 7
0
    def compare_model(self, layers):
        ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(layers)
        # Create an ELL neural network predictor from the layers
        predictor = ell.FloatNeuralNetworkPredictor(ellLayers)
        shape = predictor.GetInputShape()
        self.input_shape = (shape.channels, shape.rows, shape.columns
                            )  # to CNTK (channel, rows, coumns) order
        self.data = self.get_input_data()

        if (len(self.cntk_model.arguments) > 1):
            output = np.zeros(self.cntk_model.arguments[1].shape).astype(
                np.float32)
            predictions = self.cntk_model.eval({
                self.cntk_model.arguments[0]: [self.data],
                self.cntk_model.arguments[1]:
                output
            })
        else:
            predictions = self.cntk_model.eval(
                {self.cntk_model.arguments[0]: [self.data]})

        size = 0
        output = None
        if isinstance(predictions, dict):
            for key in self.cntk_model.outputs:
                shape = key.shape
                if len(shape) > 0:
                    s = np.max(shape)
                    if (s > size):
                        size = s
                        output = predictions[key][0] / 100
        else:
            output = predictions[0]

        self.verify_ell("Softmax", predictor, self.data, output)
        self.data = output  # make this the input to the next layer.
        self.save_layer_outputs("Softmax")
Exemplo n.º 8
0
    def compare_model(self, layers):
        ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(layers)
        # Create an ELL neural network predictor from the layers
        predictor = ell.FloatNeuralNetworkPredictor(ellLayers)
        shape = predictor.GetInputShape()

        # to CNTK (channel, rows, columns) order
        self.input_shape = (shape.channels, shape.rows, shape.columns)
        self.data = self.get_input_data()

        if len(self.cntk_model.arguments) > 1:
            output = np.zeros(self.cntk_model.arguments[1].shape,
                              dtype=np.float32)
            predictions = self.cntk_model.eval({
                self.cntk_model.arguments[0]: [self.data],
                self.cntk_model.arguments[1]: [list(range(len(self.categories)))] })
        else:
            predictions = self.cntk_model.eval({
                self.cntk_model.arguments[0]: [self.data]})

        size = 0
        cntk_output = None
        if isinstance(predictions, dict):
            for key in self.cntk_model.outputs:
                shape = key.shape
                if shape:
                    s = np.max(shape)
                    if s > size:
                        size = s
                        # CNTK models currently don't have softmax operations
                        # right now, so we work around it by including it
                        # explicitly
                        cntk_output = softmax(predictions[key][0]).eval()
        else:
            cntk_output = softmax(predictions).eval()

        self.verify_ell("Softmax", predictor, self.data, cntk_output)
Exemplo n.º 9
0
def compare_predictor_output(modelFile, labels, modelTestInput=None,
                             maxLayers=None):
    """Compares an ell.NeuralNetworkPredictor against its equivalent CNTK
    model.

    Parameters:
    modelFile -- path to the CNTK model file
    labels -- array of labels
    modelTestInput -- input data in row, column, channel ordering
    maxLayers -- integer to indicate how many layers to run before stopping.
                 Setting to None will run all layers and compare against the
                 original model.

    """

    z = load_model(modelFile)
    modelLayers = cntk_utilities.get_model_layers(z)

    # Get the relevant CNTK layers that we will convert to ELL
    layersToConvert = cntk_layers.get_filtered_layers_list(
        modelLayers, maxLayers)

    if not layersToConvert:
        raise RuntimeError("No layers are converted, nothing to test")

    # Create a list of ELL layers from the relevant CNTK layers
    print("\nCreating ELL predictor...")
    ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(
        layersToConvert)

    # Create an ELL neural network predictor from the relevant CNTK layers
    predictor = ell.FloatNeuralNetworkPredictor(ellLayers)

    if not modelTestInput:
        inputShape = predictor.GetInputShape()
        modelTestInput = np.random.uniform(
            low=0, high=255, size=(
                inputShape.rows, inputShape.columns, inputShape.channels)
                ).astype(np.float_)

    ellTestInput = modelTestInput.ravel()  # rows, columns, channels
    ellResults = predictor.Predict(ellTestInput)

    # rows, columns, channels => channels, rows, columns
    cntkTestInput = np.moveaxis(modelTestInput, -1, 0).astype(np.float32)
    cntkTestInput = np.ascontiguousarray(cntkTestInput)

    # Get the equivalent CNTK model
    if not maxLayers:
        print("\nRunning original CNTK model...")

        _, out = z.forward(
           {z.arguments[0]: [cntkTestInput],
            z.arguments[1]: [list(range(len(labels)))]})
        for output in z.outputs:
            if (output.shape == (len(labels),)):
                out = out[output]
        cntkResults = softmax(out[0]).eval()

        # For the full model, we compare prediction output instead of layers
        np.testing.assert_array_almost_equal(
            cntkResults, ellResults, 5,
            'prediction outputs do not match! (for model ' + modelFile + ')')
    else:
        print("\nRunning partial CNTK model...")

        if (layersToConvert[-1].layer.op_name == 'CrossEntropyWithSoftmax' and
                len(layersToConvert) > 2):
            # ugly hack for CrossEntropyWithSoftmax
            zz = as_composite(layersToConvert[-2].layer)
            zz = softmax(zz)
        else:
            zz = as_composite(layersToConvert[-1].layer)
            zz = softmax(zz)

        out = zz(cntkTestInput)
        orderedCntkModelResults = cntk_converters.\
            get_float_vector_from_cntk_array(out)

        np.testing.assert_array_almost_equal(
            orderedCntkModelResults, ellResults, 5,
            ('prediction outputs do not match! (for partial model ' +
                modelFile + ')'))
Exemplo n.º 10
0
    def test_batch_normalization_layer(self):
        """Test a model with a single CNTK BatchNormalization layer against the
        equivalent ELL predictor This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        # Create a test set of scales and biases to use for both CNTK and ELL
        # layers
        scaleValues = np.linspace(0.1, 0.5, num=16, dtype=np.float32)
        scaleVector = cntk_converters.get_float_vector_from_cntk_array(
            scaleValues)

        biasValues = np.linspace(1, 2, num=16, dtype=np.float32)
        biasVector = cntk_converters.get_float_vector_from_cntk_array(
            biasValues)

        meanValues = np.linspace(-0.5, 0.5, num=16, dtype=np.float32)
        meanVector = cntk_converters.get_float_vector_from_cntk_array(
            meanValues)

        varianceValues = np.linspace(-1, 1, num=16, dtype=np.float32)
        varianceVector = cntk_converters.get_float_vector_from_cntk_array(
            varianceValues)

        # Create a BatchNormalization CNTK layer
        # CNTK's BatchNormalization layer does not support setting the running
        # mean and variance, so we use a wrapper function around the
        # batch_normalization op
        batchNorm = BatchNormalizationTester(
            init_scale=scaleValues, norm_shape=scaleValues.shape,
            init_bias=biasValues, init_mean=meanValues,
            init_variance=varianceValues)

        # Input order for CNTK is channels, rows, columns
        x = input((16, 10, 10))
        cntkModel = batchNorm(x)

        # Create the equivalent ELL predictor
        layers = []
        layerParameters = ell.LayerParameters(
            # Input order for ELL is rows, columns, channels
            ell.TensorShape(10, 10, 16),
            ell.NoPadding(),
            ell.TensorShape(10, 10, 16),
            ell.NoPadding())

        # CNTK BatchNorm = ELL's BatchNorm + Scaling + Bias
        # 1e-5 is the default epsilon for CNTK's BatchNormalization Layer
        epsilon = 1e-5
        layers.append(ell.FloatBatchNormalizationLayer(
            layerParameters, meanVector, varianceVector, epsilon,
            ell.EpsilonSummand_variance))
        layers.append(ell.FloatScalingLayer(layerParameters, scaleVector))
        layers.append(ell.FloatBiasLayer(layerParameters, biasVector))

        predictor = ell.FloatNeuralNetworkPredictor(layers)

        inputValues = np.linspace(
            -5, 5, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)
        cntkResults = cntkModel(inputValues)

        orderedCntkResults = cntk_converters.get_float_vector_from_cntk_array(
            # Note that cntk inserts an extra dimension of 1 in the front
            cntkResults)
        orderedInputValues = cntk_converters.get_float_vector_from_cntk_array(
            inputValues)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results (precision is 1 less decimal place from epsilon)
        np.testing.assert_array_almost_equal(
            orderedCntkResults, ellResults, 6,
            'results for BatchNormalization layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(
            predictor, orderedInputValues, orderedCntkResults, "batch_norm",
            "test", precision=6)
Exemplo n.º 11
0
    def test_binary_convolution_layer(self):
        """Test a model with a single CNTK Binary Convolution layer against the
        equivalent ELL predictor. This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        # Create a test set of weights to use for both CNTK and ELL layers
        # CNTK has these in filters, channels, rows, columns order
        weightValues = np.random.uniform(
            low=-5, high=5, size=(5, 2, 3, 3)).astype(np.float32)

        # create an ELL Tensor from the cntk weights, which re-orders the
        # weights and produces an appropriately dimensioned tensor
        weightTensor = cntk_converters.\
            get_float_tensor_from_cntk_convolutional_weight_value_shape(
                weightValues, weightValues.shape)

        # Create a Binary Convolution CNTK layer with no bias, no activation,
        # stride 1
        # Input order for CNTK is channels, rows, columns
        x = input((2, 10, 10))
        cntkModel = CustomSign(x)

        cntkModel = BinaryConvolution(
            (10, 10), num_filters=5, channels=2, init=weightValues,
            pad=True, bias=False, init_bias=0, activation=False)(cntkModel)

        # Create the equivalent ELL predictor
        layerParameters = ell.LayerParameters(
            # Input order for ELL is rows, columns, channels. Account for
            # padding.
            ell.TensorShape(10 + 2, 10 + 2, 2),
            ell.ZeroPadding(1),
            ell.TensorShape(10, 10, 5),
            ell.NoPadding())

        convolutionalParameters = ell.BinaryConvolutionalParameters(
            3, 1, ell.BinaryConvolutionMethod.bitwise,
            ell.BinaryWeightsScale.none)

        layer = ell.FloatBinaryConvolutionalLayer(
            layerParameters, convolutionalParameters, weightTensor)

        predictor = ell.FloatNeuralNetworkPredictor([layer])

        # Get the results for both
        inputValues = np.random.uniform(
            low=-50, high=50, size=(2, 10, 10)).astype(np.float32)

        cntkResults = cntkModel(inputValues)
        orderedCntkResults = cntk_converters.get_float_vector_from_cntk_array(
            cntkResults)

        orderedInputValues = cntk_converters.get_float_vector_from_cntk_array(
            inputValues)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results
        np.testing.assert_array_equal(
            orderedCntkResults, ellResults,
            'results for Binary Convolution layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(
            predictor, orderedInputValues, orderedCntkResults,
            "binary_convolution", "test")
Exemplo n.º 12
0
    def test_max_pooling_layer(self):
        """Test a model with a single CNTK MaxPooling layer against the
        equivalent ELL predictor. This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        x = input((2, 15, 15))
        count = 0
        inputValues = np.random.uniform(
            low=-5, high=5, size=(2, 15, 15)).astype(np.float32)

        for pool_size, stride_size in product(range(2, 4), range(2, 3)):
            count += 1
            print("test pooling size ({0},{0}) and stride {1}".format(
                pool_size, stride_size))

            # Create a MaxPooling CNTK layer
            poolingLayer = MaxPooling(
                (pool_size, pool_size), pad=True, strides=stride_size)
            # Input order for CNTK is channels, rows, columns
            cntkModel = poolingLayer(x)
            # Get the results for both
            cntkResults = cntkModel(inputValues)[0]
            outputShape = cntkResults.shape

            padding = int((pool_size - 1) / 2)
            rows = int(inputValues.shape[1] + 2*padding)
            columns = int(inputValues.shape[2] + 2*padding)
            channels = int(inputValues.shape[0])

            # Create the equivalent ELL predictor
            layerParameters = ell.LayerParameters(
                # Input order for ELL is rows, columns, channels
                ell.TensorShape(rows, columns, channels),
                ell.MinPadding(padding),
                ell.TensorShape(
                    outputShape[1], outputShape[2], outputShape[0]),
                ell.NoPadding())

            poolingParameters = ell.PoolingParameters(
                pool_size, stride_size)
            layer = ell.FloatPoolingLayer(
                layerParameters, poolingParameters, ell.PoolingType.max)
            predictor = ell.FloatNeuralNetworkPredictor([layer])

            # Note that cntk inserts an extra dimension of 1 in the front
            orderedCntkResults = cntk_converters.\
                get_float_vector_from_cntk_array(cntkResults)
            orderedInputValues = cntk_converters.\
                get_float_vector_from_cntk_array(inputValues)
            ellResults = predictor.Predict(orderedInputValues)

            # Compare them
            np.testing.assert_array_almost_equal(
                orderedCntkResults, ellResults, 5,
                ('results for MaxPooling layer do not match! (poolsize = '
                    '{}, stride = {}').format(pool_size, stride_size))

            # now run same over ELL compiled model
            self.verify_compiled(
                predictor, orderedInputValues, orderedCntkResults,
                'max_pooling{}_{}'.format(pool_size, stride_size),
                'test_' + str(count))