Exemplo n.º 1
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        # Note that a single CNTK Batch Normalization layer is equivalent to the following 3 ELL layers:
        # - BatchNormalizationLayer
        # - ScalingLayer
        # - BiasLayer
        #
        # Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
        # padding requirements.

        scaleVector = converters.get_float_vector_from_cntk_trainable_parameter(
            self.scale)
        biasVector = converters.get_float_vector_from_cntk_trainable_parameter(
            self.bias)
        meanVector = converters.get_float_vector_from_cntk_trainable_parameter(
            self.mean)
        varianceVector = converters.get_float_vector_from_cntk_trainable_parameter(
            self.variance)

        # Create the ell.LayerParameters for the various ELL layers
        firstLayerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        middleLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        lastLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        # Create the layers
        ellLayers.append(ell.FloatBatchNormalizationLayer(
            firstLayerParameters, meanVector, varianceVector, self.epsilon, ell.EpsilonSummand_variance))
        ellLayers.append(ell.FloatScalingLayer(
            middleLayerParameters, scaleVector))
        ellLayers.append(ell.FloatBiasLayer(lastLayerParameters, biasVector))
Exemplo n.º 2
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        # Note that a single CNTK Linear function block is equivalent to the following 3 ELL layers:
        # - FullyConnectedLayer
        # - BiasLayer
        # - ActivationLayer. This layer is sometimes missing, depending on activation type.
        #
        # Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
        # padding requirements.

        weightsParameter = utilities.find_parameter_by_name(
            self.layer.parameters, 'W', 0)
        biasParameter = utilities.find_parameter_by_name(
            self.layer.parameters, 'b', 1)
        weightsTensor = converters.get_float_tensor_from_cntk_dense_weight_parameter(
            weightsParameter)
        biasVector = converters.get_float_vector_from_cntk_trainable_parameter(
            biasParameter)

        # Create the ell.LayerParameters for the various ELL layers
        firstLayerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        middleLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        lastLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        layerParameters = firstLayerParameters

        internalNodes = utilities.get_model_layers(self.layer.block_root)
        activationType = utilities.get_ell_activation_type(internalNodes)

        # Create the ELL fully connected layer
        ellLayers.append(ell.FloatFullyConnectedLayer(
            layerParameters, weightsTensor))

        # Create the ELL bias layer
        isSoftmaxActivation = utilities.is_softmax_activation(internalNodes)
        hasActivation = isSoftmaxActivation or activationType != None
        if (hasActivation):
            layerParameters = middleLayerParameters
        else:
            layerParameters = lastLayerParameters
        ellLayers.append(ell.FloatBiasLayer(layerParameters, biasVector))

        # Create the ELL activation layer
        if (hasActivation):
            layerParameters = lastLayerParameters

            # Special case: if this is softmax activation, create an ELL Softmax layer.
            # Else, insert an ELL ActivationLayer
            if (isSoftmaxActivation):
                ellLayers.append(ell.FloatSoftmaxLayer(layerParameters))
            else:
                ellLayers.append(ell.FloatActivationLayer(
                    layerParameters, activationType))
Exemplo n.º 3
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        if (self.layer.op_name == 'CrossEntropyWithSoftmax'):
            # ugly hack for CrossEntropyWithSoftmax
            # CrossEntropyWithSoftmax outputs to a Tensor[1], but we just need Softmax
            layerParameters = ell.LayerParameters(
                self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters)
        else:
            layerParameters = ell.LayerParameters(
                self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        # Create the ELL softmax layer
        ellLayers.append(ell.FloatSoftmaxLayer(layerParameters))
Exemplo n.º 4
0
    def test_convolution_layer(self):
        """Test a model with a single CNTK Convolution layer against the
        equivalent ELL predictor. This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        # Create a Convolution CNTK layer with no bias or activation,
        # auto-padding, stride of 1
        convolutionLayer = Convolution((3, 3), 5, pad=(
            True, True), strides=1, bias=False, init=0)
        x = input((2, 3, 4))  # Input order for CNTK is channels, rows, columns
        cntkModel = convolutionLayer(x)

        # Create a test set of weights to use for both CNTK and ELL layers
        # CNTK has these in filters, channels, rows, columns order
        weightValues = np.arange(90, dtype=np.float_).reshape(5, 2, 3, 3)

        # Set the weights
        convolutionLayer.parameters[0].value = weightValues

        # create an ELL Tensor from the cntk weights, which re-orders the
        # weights and produces an appropriately dimensioned tensor
        weightTensor = cntk_converters.\
            get_float_tensor_from_cntk_convolutional_weight_parameter(
                convolutionLayer.parameters[0])

        # Create the equivalent ELL predictor
        layerParameters = ell.LayerParameters(
            # Input order for ELL is rows, columns, channels. Account for
            # padding.
            ell.TensorShape(3 + 2, 4 + 2, 2),
            ell.ZeroPadding(1),
            ell.TensorShape(3, 4, 5),
            ell.NoPadding())

        convolutionalParameters = ell.ConvolutionalParameters(3, 1, 0, 5)

        layer = ell.FloatConvolutionalLayer(
            layerParameters, convolutionalParameters, weightTensor)
        predictor = ell.FloatNeuralNetworkPredictor([layer])

        # Get the results for both
        inputValues = np.arange(24, dtype=np.float32).reshape(2, 3, 4)
        cntkResults = cntkModel(inputValues)
        orderedCntkResults = cntk_converters.get_float_vector_from_cntk_array(
            cntkResults)
        orderedInputValues = cntk_converters.get_float_vector_from_cntk_array(
            inputValues)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results
        np.testing.assert_array_equal(
            orderedCntkResults, ellResults,
            'results for Convolution layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(
            predictor, orderedInputValues, orderedCntkResults, "convolution",
            "test")
Exemplo n.º 5
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        # Create the ell.LayerParameters for the ELL layer
        layerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        # Create the ELL activation layer
        ellLayers.append(ell.FloatActivationLayer(
            layerParameters, ell.ActivationType.leaky))
Exemplo n.º 6
0
def create_layer_parameters(inputShape, inputPadding, inputPaddingScheme,
                            outputShape, outputPadding, outputPaddingScheme):
    """Helper function to return ell.LayerParameters given input and output shapes/padding/paddingScheme"""
    inputPaddingParameters = ell.PaddingParameters(inputPaddingScheme,
                                                   inputPadding)
    outputPaddingParameters = ell.PaddingParameters(outputPaddingScheme,
                                                    outputPadding)

    return ell.LayerParameters(inputShape, inputPaddingParameters, outputShape,
                               outputPaddingParameters)
Exemplo n.º 7
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        biasVector = converters.get_float_vector_from_cntk_trainable_parameter(
            self.layer.parameters[0])

        # Create the ell.LayerParameters for the ELL layer
        layerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        # Create the ELL bias layer
        ellLayers.append(ell.FloatBiasLayer(layerParameters, biasVector))
Exemplo n.º 8
0
    def test_prelu_activation_layer(self):
        """Test a model with a single CNTK PReLU activation layer against the
        equivalent ELL predictor. This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        # Create a test set of alpha parameters to use for both CNTK and ELL
        # layers
        # Input order for CNTK is channels, rows, columns
        alphaValues = np.linspace(
            1, 2, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)

        # create an ELL Tensor from the alpha parameters, which re-orders and
        # produces an appropriately dimensioned tensor
        alphaTensor = cntk_converters.\
            get_float_tensor_from_cntk_convolutional_weight_value_shape(
                alphaValues, alphaValues.shape)

        inputValues = np.linspace(
            -5, 5, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)

        # Evaluate a PReLU CNTK layer
        x = input((16, 10, 10))
        p = parameter(shape=x.shape, init=alphaValues, name="prelu")
        cntkModel = param_relu(p, x)

        # Create the equivalent ELL predictor
        layerParameters = ell.LayerParameters(
            # Input order for ELL is rows, columns, channels
            ell.TensorShape(10, 10, 16),
            ell.NoPadding(),
            ell.TensorShape(10, 10, 16),
            ell.NoPadding())
        layer = ell.FloatPReLUActivationLayer(layerParameters, alphaTensor)
        predictor = ell.FloatNeuralNetworkPredictor([layer])

        cntkResults = cntkModel(inputValues)
        orderedCntkResults = cntk_converters.get_float_vector_from_cntk_array(
            cntkResults)
        orderedInputValues = cntk_converters.get_float_vector_from_cntk_array(
            inputValues)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results
        np.testing.assert_array_equal(
            orderedCntkResults, ellResults,
            'results for PReLU Activation layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(
            predictor, orderedInputValues, orderedCntkResults,
            "prelu_activation", "test")
Exemplo n.º 9
0
    def test_dense_layer(self):
        """Test a model with a single CNTK Dense layer against the equivalent
        ELL predictor. This verifies that the import functions reshape and
        reorder values appropriately and that the equivalent ELL layer
        produces comparable output
        """

        # Create a Dense CNTK layer with no bias or activation
        denseLayer = Dense(5, bias=False)
        x = input((2, 3, 4))  # Input order for CNTK is channels, rows, columns
        cntkModel = denseLayer(x)

        # Create a test set of weights to use for both CNTK and ELL layers
        # CNTK has these in channels, rows, columns, [output shape] order
        weightValues = np.arange(120, dtype=np.float_).reshape(2, 3, 4, 5)

        # Set the weights
        denseLayer.parameters[0].value = weightValues

        # create an ELL Tensor from the cntk weights, which re-orders the
        # weights and produces an appropriately dimensioned tensor
        weightTensor = cntk_converters.\
            get_float_tensor_from_cntk_dense_weight_parameter(
                denseLayer.parameters[0])

        # Create the equivalent ELL predictor
        layerParameters = ell.LayerParameters(
            # Input order for ELL is rows, columns, channels
            ell.TensorShape(3, 4, 2),
            ell.NoPadding(),
            ell.TensorShape(1, 1, 5),
            ell.NoPadding())

        layer = ell.FloatFullyConnectedLayer(layerParameters, weightTensor)
        predictor = ell.FloatNeuralNetworkPredictor([layer])

        # Get the results for both
        inputValues = np.arange(24, dtype=np.float32).reshape(2, 3, 4)
        orderedInputValues = cntk_converters.get_float_vector_from_cntk_array(
            inputValues)
        cntkResults = cntkModel(inputValues)
        orderedCntkResults = cntk_converters.get_float_vector_from_cntk_array(
            cntkResults)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results
        np.testing.assert_array_equal(
            orderedCntkResults, ellResults,
            'results for Dense layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(predictor, orderedInputValues, orderedCntkResults,
                             "dense", "test")
Exemplo n.º 10
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        preluTensor = converters.get_float_tensor_from_cntk_convolutional_weight_parameter(
            self.prelu_parameter)

        # Create the ell.LayerParameters for the ELL layer
        layerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        # Create the ELL PReLU activation layer
        ellLayers.append(ell.FloatPReLUActivationLayer(
            layerParameters, preluTensor))
Exemplo n.º 11
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        # Create the ell.LayerParameters for the ELL layer
        layerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        bias = -1.0 * self.layer.constants[0].value
        if len(bias.shape) == 0:
            biasVector = converters.get_float_vector_from_constant(bias, layerParameters.outputShape.channels)
        else:
            biasVector = converters.get_float_vector_from_cntk_array(bias)

        # Create the ELL bias layer
        ellLayers.append(ell.FloatBiasLayer(layerParameters, biasVector))
Exemplo n.º 12
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        # Create the ell.LayerParameters for the ELL layer
        layerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        # Fill in the pooling parameters
        poolingSize = self.attributes['poolingWindowShape'][0]
        stride = self.attributes['strides'][0]

        poolingParameters = ell.PoolingParameters(poolingSize, stride)

        # Create the ELL pooling layer
        ellLayers.append(ell.FloatPoolingLayer(
            layerParameters, poolingParameters, self.pooling_type))
Exemplo n.º 13
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        # Create the ell.LayerParameters for the ELL layer
        layerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        # Create ELL scaling layer
        if (self.scale.value.size == 1):
            scalesVector = converters.get_float_vector_from_constant(
                self.scale.value, layerParameters.outputShape.channels)
        else:
            scalesVector = converters.get_float_vector_from_cntk_array(
                self.scale.value)

        ellLayers.append(ell.FloatScalingLayer(
            layerParameters, scalesVector))
Exemplo n.º 14
0
    def process(self, ellLayers):
        """Helper to convert a binary convolutional layer to the ELL equivalent."""

        # A CNTK Binary Convolutional layer is a single function.
        # Bias and Activation are separate layers (processed outside of this class).
        weightsTensor = converters.get_float_tensor_from_cntk_convolutional_weight_parameter(
            self.weights_parameter)

        layerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
            self.layer.ell_outputPaddingParameters)

        # Fill in the convolutional parameters
        weightsShape = self.weights_parameter.shape
        receptiveField = weightsShape[2]
        stride = self.attributes['strides'][2]

        convolutionalParameters = ell.BinaryConvolutionalParameters(
            receptiveField, stride, self.convolution_method, self.weights_scale)

        ellLayers.append(ell.FloatBinaryConvolutionalLayer(
            layerParameters, convolutionalParameters, weightsTensor))
Exemplo n.º 15
0
    def test_batch_normalization_layer(self):
        """Test a model with a single CNTK BatchNormalization layer against the
        equivalent ELL predictor This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        # Create a test set of scales and biases to use for both CNTK and ELL
        # layers
        scaleValues = np.linspace(0.1, 0.5, num=16, dtype=np.float32)
        scaleVector = cntk_converters.get_float_vector_from_cntk_array(
            scaleValues)

        biasValues = np.linspace(1, 2, num=16, dtype=np.float32)
        biasVector = cntk_converters.get_float_vector_from_cntk_array(
            biasValues)

        meanValues = np.linspace(-0.5, 0.5, num=16, dtype=np.float32)
        meanVector = cntk_converters.get_float_vector_from_cntk_array(
            meanValues)

        varianceValues = np.linspace(-1, 1, num=16, dtype=np.float32)
        varianceVector = cntk_converters.get_float_vector_from_cntk_array(
            varianceValues)

        # Create a BatchNormalization CNTK layer
        # CNTK's BatchNormalization layer does not support setting the running
        # mean and variance, so we use a wrapper function around the
        # batch_normalization op
        batchNorm = BatchNormalizationTester(
            init_scale=scaleValues, norm_shape=scaleValues.shape,
            init_bias=biasValues, init_mean=meanValues,
            init_variance=varianceValues)

        # Input order for CNTK is channels, rows, columns
        x = input((16, 10, 10))
        cntkModel = batchNorm(x)

        # Create the equivalent ELL predictor
        layers = []
        layerParameters = ell.LayerParameters(
            # Input order for ELL is rows, columns, channels
            ell.TensorShape(10, 10, 16),
            ell.NoPadding(),
            ell.TensorShape(10, 10, 16),
            ell.NoPadding())

        # CNTK BatchNorm = ELL's BatchNorm + Scaling + Bias
        # 1e-5 is the default epsilon for CNTK's BatchNormalization Layer
        epsilon = 1e-5
        layers.append(ell.FloatBatchNormalizationLayer(
            layerParameters, meanVector, varianceVector, epsilon,
            ell.EpsilonSummand_variance))
        layers.append(ell.FloatScalingLayer(layerParameters, scaleVector))
        layers.append(ell.FloatBiasLayer(layerParameters, biasVector))

        predictor = ell.FloatNeuralNetworkPredictor(layers)

        inputValues = np.linspace(
            -5, 5, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)
        cntkResults = cntkModel(inputValues)

        orderedCntkResults = cntk_converters.get_float_vector_from_cntk_array(
            # Note that cntk inserts an extra dimension of 1 in the front
            cntkResults)
        orderedInputValues = cntk_converters.get_float_vector_from_cntk_array(
            inputValues)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results (precision is 1 less decimal place from epsilon)
        np.testing.assert_array_almost_equal(
            orderedCntkResults, ellResults, 6,
            'results for BatchNormalization layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(
            predictor, orderedInputValues, orderedCntkResults, "batch_norm",
            "test", precision=6)
Exemplo n.º 16
0
    def test_binary_convolution_layer(self):
        """Test a model with a single CNTK Binary Convolution layer against the
        equivalent ELL predictor. This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        # Create a test set of weights to use for both CNTK and ELL layers
        # CNTK has these in filters, channels, rows, columns order
        weightValues = np.random.uniform(
            low=-5, high=5, size=(5, 2, 3, 3)).astype(np.float32)

        # create an ELL Tensor from the cntk weights, which re-orders the
        # weights and produces an appropriately dimensioned tensor
        weightTensor = cntk_converters.\
            get_float_tensor_from_cntk_convolutional_weight_value_shape(
                weightValues, weightValues.shape)

        # Create a Binary Convolution CNTK layer with no bias, no activation,
        # stride 1
        # Input order for CNTK is channels, rows, columns
        x = input((2, 10, 10))
        cntkModel = CustomSign(x)

        cntkModel = BinaryConvolution(
            (10, 10), num_filters=5, channels=2, init=weightValues,
            pad=True, bias=False, init_bias=0, activation=False)(cntkModel)

        # Create the equivalent ELL predictor
        layerParameters = ell.LayerParameters(
            # Input order for ELL is rows, columns, channels. Account for
            # padding.
            ell.TensorShape(10 + 2, 10 + 2, 2),
            ell.ZeroPadding(1),
            ell.TensorShape(10, 10, 5),
            ell.NoPadding())

        convolutionalParameters = ell.BinaryConvolutionalParameters(
            3, 1, ell.BinaryConvolutionMethod.bitwise,
            ell.BinaryWeightsScale.none)

        layer = ell.FloatBinaryConvolutionalLayer(
            layerParameters, convolutionalParameters, weightTensor)

        predictor = ell.FloatNeuralNetworkPredictor([layer])

        # Get the results for both
        inputValues = np.random.uniform(
            low=-50, high=50, size=(2, 10, 10)).astype(np.float32)

        cntkResults = cntkModel(inputValues)
        orderedCntkResults = cntk_converters.get_float_vector_from_cntk_array(
            cntkResults)

        orderedInputValues = cntk_converters.get_float_vector_from_cntk_array(
            inputValues)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results
        np.testing.assert_array_equal(
            orderedCntkResults, ellResults,
            'results for Binary Convolution layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(
            predictor, orderedInputValues, orderedCntkResults,
            "binary_convolution", "test")
Exemplo n.º 17
0
    def test_max_pooling_layer(self):
        """Test a model with a single CNTK MaxPooling layer against the
        equivalent ELL predictor. This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        x = input((2, 15, 15))
        count = 0
        inputValues = np.random.uniform(
            low=-5, high=5, size=(2, 15, 15)).astype(np.float32)

        for pool_size, stride_size in product(range(2, 4), range(2, 3)):
            count += 1
            print("test pooling size ({0},{0}) and stride {1}".format(
                pool_size, stride_size))

            # Create a MaxPooling CNTK layer
            poolingLayer = MaxPooling(
                (pool_size, pool_size), pad=True, strides=stride_size)
            # Input order for CNTK is channels, rows, columns
            cntkModel = poolingLayer(x)
            # Get the results for both
            cntkResults = cntkModel(inputValues)[0]
            outputShape = cntkResults.shape

            padding = int((pool_size - 1) / 2)
            rows = int(inputValues.shape[1] + 2*padding)
            columns = int(inputValues.shape[2] + 2*padding)
            channels = int(inputValues.shape[0])

            # Create the equivalent ELL predictor
            layerParameters = ell.LayerParameters(
                # Input order for ELL is rows, columns, channels
                ell.TensorShape(rows, columns, channels),
                ell.MinPadding(padding),
                ell.TensorShape(
                    outputShape[1], outputShape[2], outputShape[0]),
                ell.NoPadding())

            poolingParameters = ell.PoolingParameters(
                pool_size, stride_size)
            layer = ell.FloatPoolingLayer(
                layerParameters, poolingParameters, ell.PoolingType.max)
            predictor = ell.FloatNeuralNetworkPredictor([layer])

            # Note that cntk inserts an extra dimension of 1 in the front
            orderedCntkResults = cntk_converters.\
                get_float_vector_from_cntk_array(cntkResults)
            orderedInputValues = cntk_converters.\
                get_float_vector_from_cntk_array(inputValues)
            ellResults = predictor.Predict(orderedInputValues)

            # Compare them
            np.testing.assert_array_almost_equal(
                orderedCntkResults, ellResults, 5,
                ('results for MaxPooling layer do not match! (poolsize = '
                    '{}, stride = {}').format(pool_size, stride_size))

            # now run same over ELL compiled model
            self.verify_compiled(
                predictor, orderedInputValues, orderedCntkResults,
                'max_pooling{}_{}'.format(pool_size, stride_size),
                'test_' + str(count))
Exemplo n.º 18
0
    def process(self, ellLayers):
        """Helper to convert a convolutional layer to the ELL equivalent."""

        # Note that a single CNTK Convolutional function block is equivalent to the following 3 ELL layers:
        # - ConvolutionalLayer
        # - BiasLayer. This layer is sometimes missing, depending on whether bias is included.
        # - ActivationLayer. This layer is sometimes missing, depending on activation type.
        #
        # Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
        # padding requirements.

        weightsTensor = converters.get_float_tensor_from_cntk_convolutional_weight_parameter(
            self.weights_parameter)

        internalNodes = utilities.get_model_layers(self.layer.block_root)
        activationType = utilities.get_ell_activation_type(internalNodes)
        isSoftmaxActivation = utilities.is_softmax_activation(internalNodes)
        hasActivation = isSoftmaxActivation or activationType != None
        hasBias = self.bias_parameter != None

        # Create the ell.LayerParameters for the various ELL layers
        onlyLayerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)
        firstLayerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        middleLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        lastLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        # Choose the layer parameters for the convolutional layer. If there is 
        # bias or activation, then the convolution is the first of two or more,
        # otherwise it is the only layer
        if hasActivation or hasBias:
            layerParameters = firstLayerParameters
        else:
            layerParameters = onlyLayerParameters

        # Fill in the convolutional parameters
        weightsShape = self.weights_parameter.shape
        receptiveField = weightsShape[2]
        stride = self.attributes['strides'][2]

        filterBatchSize = layerParameters.outputShape.channels

        convolutionalParameters = ell.ConvolutionalParameters(
            receptiveField, stride, self.convolution_method, filterBatchSize)

        # Create the ELL convolutional layer
        ellLayers.append(ell.FloatConvolutionalLayer(
            layerParameters, convolutionalParameters, weightsTensor))

        # Create the ELL bias layer
        if hasBias:
            if hasActivation:
                layerParameters = middleLayerParameters
            else:
                layerParameters = lastLayerParameters
            biasVector = converters.get_float_vector_from_cntk_trainable_parameter(
                self.bias_parameter)
            ellLayers.append(ell.FloatBiasLayer(layerParameters, biasVector))

        # Create the ELL activation layer
        if hasActivation:
            layerParameters = lastLayerParameters

            # Special case: if this is softmax activation, create an ELL Softmax layer.
            # Else, insert an ELL ActivationLayer
            if (isSoftmaxActivation):
                ellLayers.append(ell.FloatSoftmaxLayer(layerParameters))
            else:
                ellLayers.append(ell.FloatActivationLayer(
                    layerParameters, activationType))