Exemplo n.º 1
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        # Note that a single CNTK Batch Normalization layer is equivalent to the following 3 ELL layers:
        # - BatchNormalizationLayer
        # - ScalingLayer
        # - BiasLayer
        #
        # Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
        # padding requirements.

        scaleVector = converters.get_float_vector_from_cntk_trainable_parameter(
            self.scale)
        biasVector = converters.get_float_vector_from_cntk_trainable_parameter(
            self.bias)
        meanVector = converters.get_float_vector_from_cntk_trainable_parameter(
            self.mean)
        varianceVector = converters.get_float_vector_from_cntk_trainable_parameter(
            self.variance)

        # Create the ell.LayerParameters for the various ELL layers
        firstLayerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        middleLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        lastLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        # Create the layers
        ellLayers.append(ell.FloatBatchNormalizationLayer(
            firstLayerParameters, meanVector, varianceVector, self.epsilon, ell.EpsilonSummand_variance))
        ellLayers.append(ell.FloatScalingLayer(
            middleLayerParameters, scaleVector))
        ellLayers.append(ell.FloatBiasLayer(lastLayerParameters, biasVector))
Exemplo n.º 2
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        # Note that a single CNTK Linear function block is equivalent to the following 3 ELL layers:
        # - FullyConnectedLayer
        # - BiasLayer
        # - ActivationLayer. This layer is sometimes missing, depending on activation type.
        #
        # Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
        # padding requirements.

        weightsParameter = utilities.find_parameter_by_name(
            self.layer.parameters, 'W', 0)
        biasParameter = utilities.find_parameter_by_name(
            self.layer.parameters, 'b', 1)
        weightsTensor = converters.get_float_tensor_from_cntk_dense_weight_parameter(
            weightsParameter)
        biasVector = converters.get_float_vector_from_cntk_trainable_parameter(
            biasParameter)

        # Create the ell.LayerParameters for the various ELL layers
        firstLayerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        middleLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        lastLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        layerParameters = firstLayerParameters

        internalNodes = utilities.get_model_layers(self.layer.block_root)
        activationType = utilities.get_ell_activation_type(internalNodes)

        # Create the ELL fully connected layer
        ellLayers.append(ell.FloatFullyConnectedLayer(
            layerParameters, weightsTensor))

        # Create the ELL bias layer
        isSoftmaxActivation = utilities.is_softmax_activation(internalNodes)
        hasActivation = isSoftmaxActivation or activationType != None
        if (hasActivation):
            layerParameters = middleLayerParameters
        else:
            layerParameters = lastLayerParameters
        ellLayers.append(ell.FloatBiasLayer(layerParameters, biasVector))

        # Create the ELL activation layer
        if (hasActivation):
            layerParameters = lastLayerParameters

            # Special case: if this is softmax activation, create an ELL Softmax layer.
            # Else, insert an ELL ActivationLayer
            if (isSoftmaxActivation):
                ellLayers.append(ell.FloatSoftmaxLayer(layerParameters))
            else:
                ellLayers.append(ell.FloatActivationLayer(
                    layerParameters, activationType))
Exemplo n.º 3
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        biasVector = converters.get_float_vector_from_cntk_trainable_parameter(
            self.layer.parameters[0])

        # Create the ell.LayerParameters for the ELL layer
        layerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        # Create the ELL bias layer
        ellLayers.append(ell.FloatBiasLayer(layerParameters, biasVector))
Exemplo n.º 4
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        # Create the ell.LayerParameters for the ELL layer
        layerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        bias = -1.0 * self.layer.constants[0].value
        if len(bias.shape) == 0:
            biasVector = converters.get_float_vector_from_constant(bias, layerParameters.outputShape.channels)
        else:
            biasVector = converters.get_float_vector_from_cntk_array(bias)

        # Create the ELL bias layer
        ellLayers.append(ell.FloatBiasLayer(layerParameters, biasVector))
Exemplo n.º 5
0
def get_bias_layer(layer, apply_padding, bias_vals):
    """Return an ELL bias layer from a darknet layer"""

    if (apply_padding):
        layerParameters = create_layer_parameters(
            layer['outputShapeMinusPadding'], 0, ell.PaddingScheme.zeros,
            layer['outputShape'], layer['outputPadding'],
            layer['outputPaddingScheme'])
    else:
        layerParameters = create_layer_parameters(
            layer['outputShapeMinusPadding'], 0, ell.PaddingScheme.zeros,
            layer['outputShapeMinusPadding'], 0, ell.PaddingScheme.zeros)

    biasVector = ell.FloatVector(bias_vals.ravel())

    return ell.FloatBiasLayer(layerParameters, biasVector)
Exemplo n.º 6
0
    def test_batch_normalization_layer(self):
        """Test a model with a single CNTK BatchNormalization layer against the
        equivalent ELL predictor This verifies that the import functions
        reshape and reorder values appropriately and that the equivalent ELL
        layer produces comparable output
        """

        # Create a test set of scales and biases to use for both CNTK and ELL
        # layers
        scaleValues = np.linspace(0.1, 0.5, num=16, dtype=np.float32)
        scaleVector = cntk_converters.get_float_vector_from_cntk_array(
            scaleValues)

        biasValues = np.linspace(1, 2, num=16, dtype=np.float32)
        biasVector = cntk_converters.get_float_vector_from_cntk_array(
            biasValues)

        meanValues = np.linspace(-0.5, 0.5, num=16, dtype=np.float32)
        meanVector = cntk_converters.get_float_vector_from_cntk_array(
            meanValues)

        varianceValues = np.linspace(-1, 1, num=16, dtype=np.float32)
        varianceVector = cntk_converters.get_float_vector_from_cntk_array(
            varianceValues)

        # Create a BatchNormalization CNTK layer
        # CNTK's BatchNormalization layer does not support setting the running
        # mean and variance, so we use a wrapper function around the
        # batch_normalization op
        batchNorm = BatchNormalizationTester(
            init_scale=scaleValues, norm_shape=scaleValues.shape,
            init_bias=biasValues, init_mean=meanValues,
            init_variance=varianceValues)

        # Input order for CNTK is channels, rows, columns
        x = input((16, 10, 10))
        cntkModel = batchNorm(x)

        # Create the equivalent ELL predictor
        layers = []
        layerParameters = ell.LayerParameters(
            # Input order for ELL is rows, columns, channels
            ell.TensorShape(10, 10, 16),
            ell.NoPadding(),
            ell.TensorShape(10, 10, 16),
            ell.NoPadding())

        # CNTK BatchNorm = ELL's BatchNorm + Scaling + Bias
        # 1e-5 is the default epsilon for CNTK's BatchNormalization Layer
        epsilon = 1e-5
        layers.append(ell.FloatBatchNormalizationLayer(
            layerParameters, meanVector, varianceVector, epsilon,
            ell.EpsilonSummand_variance))
        layers.append(ell.FloatScalingLayer(layerParameters, scaleVector))
        layers.append(ell.FloatBiasLayer(layerParameters, biasVector))

        predictor = ell.FloatNeuralNetworkPredictor(layers)

        inputValues = np.linspace(
            -5, 5, num=16 * 10 * 10, dtype=np.float32).reshape(16, 10, 10)
        cntkResults = cntkModel(inputValues)

        orderedCntkResults = cntk_converters.get_float_vector_from_cntk_array(
            # Note that cntk inserts an extra dimension of 1 in the front
            cntkResults)
        orderedInputValues = cntk_converters.get_float_vector_from_cntk_array(
            inputValues)
        ellResults = predictor.Predict(orderedInputValues)

        # Compare the results (precision is 1 less decimal place from epsilon)
        np.testing.assert_array_almost_equal(
            orderedCntkResults, ellResults, 6,
            'results for BatchNormalization layer do not match!')

        # now run same over ELL compiled model
        self.verify_compiled(
            predictor, orderedInputValues, orderedCntkResults, "batch_norm",
            "test", precision=6)
Exemplo n.º 7
0
    def process(self, ellLayers):
        """Helper to convert a convolutional layer to the ELL equivalent."""

        # Note that a single CNTK Convolutional function block is equivalent to the following 3 ELL layers:
        # - ConvolutionalLayer
        # - BiasLayer. This layer is sometimes missing, depending on whether bias is included.
        # - ActivationLayer. This layer is sometimes missing, depending on activation type.
        #
        # Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
        # padding requirements.

        weightsTensor = converters.get_float_tensor_from_cntk_convolutional_weight_parameter(
            self.weights_parameter)

        internalNodes = utilities.get_model_layers(self.layer.block_root)
        activationType = utilities.get_ell_activation_type(internalNodes)
        isSoftmaxActivation = utilities.is_softmax_activation(internalNodes)
        hasActivation = isSoftmaxActivation or activationType != None
        hasBias = self.bias_parameter != None

        # Create the ell.LayerParameters for the various ELL layers
        onlyLayerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)
        firstLayerParameters = ell.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        middleLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShapeMinusPadding, ell.NoPadding())
        lastLayerParameters = ell.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.NoPadding(
        ), self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        # Choose the layer parameters for the convolutional layer. If there is 
        # bias or activation, then the convolution is the first of two or more,
        # otherwise it is the only layer
        if hasActivation or hasBias:
            layerParameters = firstLayerParameters
        else:
            layerParameters = onlyLayerParameters

        # Fill in the convolutional parameters
        weightsShape = self.weights_parameter.shape
        receptiveField = weightsShape[2]
        stride = self.attributes['strides'][2]

        filterBatchSize = layerParameters.outputShape.channels

        convolutionalParameters = ell.ConvolutionalParameters(
            receptiveField, stride, self.convolution_method, filterBatchSize)

        # Create the ELL convolutional layer
        ellLayers.append(ell.FloatConvolutionalLayer(
            layerParameters, convolutionalParameters, weightsTensor))

        # Create the ELL bias layer
        if hasBias:
            if hasActivation:
                layerParameters = middleLayerParameters
            else:
                layerParameters = lastLayerParameters
            biasVector = converters.get_float_vector_from_cntk_trainable_parameter(
                self.bias_parameter)
            ellLayers.append(ell.FloatBiasLayer(layerParameters, biasVector))

        # Create the ELL activation layer
        if hasActivation:
            layerParameters = lastLayerParameters

            # Special case: if this is softmax activation, create an ELL Softmax layer.
            # Else, insert an ELL ActivationLayer
            if (isSoftmaxActivation):
                ellLayers.append(ell.FloatSoftmaxLayer(layerParameters))
            else:
                ellLayers.append(ell.FloatActivationLayer(
                    layerParameters, activationType))