Esempio n. 1
0
    def process(self, ellLayers):
        """Appends the ELL representation of the current layer to ellLayers."""

        # Note that a single CNTK Linear function block is equivalent to the following 3 ELL layers:
        # - FullyConnectedLayer
        # - BiasLayer
        # - ActivationLayer. This layer is sometimes missing, depending on activation type.
        #
        # Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
        # padding requirements.

        weightsParameter = utilities.find_parameter_by_name(
            self.layer.parameters, 'W', 0)
        biasParameter = utilities.find_parameter_by_name(
            self.layer.parameters, 'b', 1)
        weightsTensor = converters.get_float_tensor_from_cntk_dense_weight_parameter(
            weightsParameter)
        biasVector = converters.get_float_vector_from_cntk_trainable_parameter(
            biasParameter)

        # Create the ELL.LayerParameters for the various ELL layers
        firstLayerParameters = ELL.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters,
            self.layer.ell_outputShapeMinusPadding, ELL.NoPadding())
        middleLayerParameters = ELL.LayerParameters(
            self.layer.ell_outputShapeMinusPadding, ELL.NoPadding(),
            self.layer.ell_outputShapeMinusPadding, ELL.NoPadding())
        lastLayerParameters = ELL.LayerParameters(
            self.layer.ell_outputShapeMinusPadding, ELL.NoPadding(),
            self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        layerParameters = firstLayerParameters

        internalNodes = utilities.get_model_layers(self.layer.block_root)
        activationType = utilities.get_ell_activation_type(internalNodes)

        # Create the ELL fully connected layer
        ellLayers.append(
            ELL.FloatFullyConnectedLayer(layerParameters, weightsTensor))

        # Create the ELL bias layer
        isSoftmaxActivation = utilities.is_softmax_activation(internalNodes)
        hasActivation = isSoftmaxActivation or activationType != None
        if (hasActivation):
            layerParameters = middleLayerParameters
        else:
            layerParameters = lastLayerParameters
        ellLayers.append(ELL.FloatBiasLayer(layerParameters, biasVector))

        # Create the ELL activation layer
        if (hasActivation):
            layerParameters = lastLayerParameters

            # Special case: if this is softmax activation, create an ELL Softmax layer.
            # Else, insert an ELL ActivationLayer
            if (isSoftmaxActivation):
                ellLayers.append(ELL.FloatSoftmaxLayer(layerParameters))
            else:
                ellLayers.append(
                    ELL.FloatActivationLayer(layerParameters, activationType))
Esempio n. 2
0
def predictor_from_cntk_model(modelFile, plotModel=False):
    """Loads a CNTK model and returns an ELL.NeuralNetworkPredictor"""

    print("Loading...")
    z = load_model(modelFile)
    print("\nFinished loading.")

    if plotModel:
        filename = os.path.join(os.path.dirname(modelFile),
                                os.path.basename(modelFile) + ".png")
        cntk_utilities.plot_model(z, filename)

    print("Pre-processing...")
    modelLayers = cntk_utilities.get_model_layers(z)

    # Get the relevant CNTK layers that we will convert to ELL
    layersToConvert = cntk_layers.get_filtered_layers_list(modelLayers)
    print("\nFinished pre-processing.")

    predictor = None

    try:
        # Create a list of ELL layers from the CNTK layers
        ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(
            layersToConvert)
        # Create an ELL neural network predictor from the layers
        predictor = ELL.FloatNeuralNetworkPredictor(ellLayers)
    except BaseException as exception:
        print("Error occurred attempting to convert cntk layers to ELL layers")
        raise exception

    return predictor
Esempio n. 3
0
    def __init__(self, layer):
        if not layer.is_block:
            raise ValueError(
                "Error: Convolution layer node is not in block node")

        self.op_name = 'Convolution'
        # initialize weights and input characteristics
        self.input_parameter = layer.arguments[0]
        self.weights_parameter = utilities.find_parameter_by_name(
            layer.parameters, 'W', 0)
        self.bias_parameter = utilities.find_parameter_by_name(
            layer.parameters, 'b', 1)

        # Get the hyper-parameters for the convolution.
        # They are on the convolution node inside this block.
        convolution_nodes = depth_first_search(
            layer.block_root,
            lambda x: utilities.op_name_equals(x, 'Convolution'))

        self.attributes = convolution_nodes[0].attributes
        self.convolution_method = 0
        self.input_shape = self.input_parameter.shape

        super().__init__(layer)
        nodes = utilities.get_model_layers(layer.block_root)
        if utilities.is_softmax_activation(nodes):
            self.additional_layer_text = 'softmax'
        else:
            activation_type = utilities.get_cntk_activation_name(nodes)
            if activation_type:
                self.additional_layer_text = activation_type
Esempio n. 4
0
    def clone_cntk_layer(self, feature):
        """Returns a clone of the CNTK layer for per-layer forward prop validation"""

        activation = None
        nodes = utilities.get_model_layers(self.layer.block_root)
        if (utilities.find_node_by_op_name(nodes, 'ReLU') != None):
            activation = relu
        elif (utilities.find_node_by_op_name(nodes, 'Sigmoid') != None):
            activation = sigmoid
        elif (utilities.find_node_by_op_name(nodes, 'LeakyReLU') != None):
            activation = leaky_relu

        weightsShape = self.weights_parameter.shape
        pad = self.attributes['autoPadding'][0] or (
            self.attributes['autoPadding'][1]
            and self.attributes['autoPadding'][2])
        bias = (self.bias_parameter is not None)

        layer = Convolution((weightsShape[2], weightsShape[3]),
                            weightsShape[0],
                            pad=pad,
                            activation=activation,
                            bias=bias)(feature)

        layer.parameters[0].value = self.weights_parameter.value
        if bias:
            layer.parameters[1].value = self.bias_parameter.value
        return layer
Esempio n. 5
0
    def __init__(self, layer):
        if not layer.is_block:
            raise ValueError("Dense node is not a block node")

        self.op_name = 'Dense'
        super().__init__(layer)
        internalNodes = utilities.get_model_layers(self.layer.block_root)
        self.additional_layer_text = utilities.get_cntk_activation_name(internalNodes)
Esempio n. 6
0
    def model_test_impl(self, model):
        self.reset()
        with self.subTest(model=model):
            self.cntk_model = load_model(model + '.cntk')
            modelLayers = cntk_utilities.get_model_layers(self.cntk_model)
            # Get the relevant CNTK layers that we will convert to ELL
            layersToConvert = cntk_layers.get_filtered_layers_list(modelLayers)

            self.compare_model(layersToConvert)

            self.print_top_result()
Esempio n. 7
0
    def clone_cntk_layer(self, feature):
        """Returns a clone of the CNTK layer for per-layer forward prop validation"""
        weightsParameter = utilities.find_parameter_by_name(
            self.layer.parameters, 'W', 0)
        biasParameter = utilities.find_parameter_by_name(
            self.layer.parameters, 'b', 1)

        internalNodes = utilities.get_model_layers(self.layer.block_root)
        activationType = utilities.get_cntk_activation_op(internalNodes)

        includeBias = biasParameter is not None
        layer = Dense(self.layer.shape, activation=activationType, bias=includeBias)(feature)

        layer.parameters[0].value = weightsParameter.value
        if includeBias:
            layer.parameters[1].value = biasParameter.value
        return layer
Esempio n. 8
0
    def clone_cntk_layer(self, feature):
        """Returns a clone of the CNTK layer for per-layer forward prop validation"""

        nodes = utilities.get_model_layers(self.layer.block_root)
        activation = utilities.get_cntk_activation_op(nodes)

        weightsShape = self.weights_parameter.shape
        pad = self.attributes['autoPadding'][0] or (
            self.attributes['autoPadding'][1] and self.attributes['autoPadding'][2])
        bias = (self.bias_parameter is not None)

        layer = Convolution((weightsShape[2], weightsShape[3]), weightsShape[0],
                            pad=pad, activation=activation, bias=bias)(feature)

        layer.parameters[0].value = self.weights_parameter.value
        if bias:
            layer.parameters[1].value = self.bias_parameter.value
        return layer
Esempio n. 9
0
    def __repr__(self):
        """Prints summary info about this layer."""

        label = self.op_name
        nodes = utilities.get_model_layers(self.layer.block_root)
        if utilities.is_softmax_activation(nodes):
            label += "(softmax)"
        else:
            activation_type = utilities.get_activation_type(nodes)
            if activation_type is not None:
                label += "(" + utilities.ell_activation_type_to_string(
                    activation_type) + ")"

        return " ".join(
            (label, ": ",
             utilities.ell_shape_to_string(self.layer.ell_inputShape), " -> ",
             utilities.ell_shape_to_string(self.layer.ell_outputShape),
             "| input padding",
             str(self.layer.ell_inputPaddingParameters.paddingSize),
             " output padding",
             str(self.layer.ell_outputPaddingParameters.paddingSize)))
Esempio n. 10
0
    def run(self):
        self.report = open("report.md", "w")
        self.report.write("# Comparison Results\n")
        self.report.write("**model**: %s\n\n" % (self.model_file))
        if self.image_file != None:
            self.image = self.load_image(self.image_file)
            self.report.write("**image**: %s\n\n" % (self.image_file))

        self.cntk_model = cntk.load_model(self.model_file)
        modelLayers = cntk_utilities.get_model_layers(self.cntk_model)
        # Get the relevant CNTK layers that we will convert to ELL
        layersToConvert = cntk_layers.get_filtered_layers_list(modelLayers)
        _logger.info(
            "----------------------------------------------------------------------------------"
        )
        if self.layers:
            for layer in layersToConvert:
                self.compare_layer(layer)
        else:
            self.compare_model(layersToConvert)

        self.print_top_result()
        self.report.close()
Esempio n. 11
0
def compare_predictor_output(modelFile,
                             labels,
                             modelTestInput=None,
                             maxLayers=None):
    """Compares an ell.NeuralNetworkPredictor against its equivalent CNTK
    model.

    Parameters:
    modelFile -- path to the CNTK model file
    labels -- array of labels
    modelTestInput -- input data in row, column, channel ordering
    maxLayers -- integer to indicate how many layers to run before stopping.
                 Setting to None will run all layers and compare against the
                 original model.

    """

    z = load_model(modelFile)
    modelLayers = cntk_utilities.get_model_layers(z)

    # Get the relevant CNTK layers that we will convert to ELL
    layersToConvert = cntk_layers.get_filtered_layers_list(
        modelLayers, maxLayers)

    if not layersToConvert:
        raise RuntimeError("No layers are converted, nothing to test")

    # Create a list of ELL layers from the relevant CNTK layers
    _logger.info("\nCreating ELL predictor...")
    ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(layersToConvert)

    # Create an ELL neural network predictor from the relevant CNTK layers
    predictor = ell.neural.FloatNeuralNetworkPredictor(ellLayers)

    if not modelTestInput:
        inputShape = predictor.GetInputShape()
        modelTestInput = np.random.uniform(
            low=0,
            high=255,
            size=(inputShape.rows, inputShape.columns,
                  inputShape.channels)).astype(np.float_)

    ellTestInput = modelTestInput.ravel()  # rows, columns, channels
    ellResults = predictor.Predict(ellTestInput)

    # rows, columns, channels => channels, rows, columns
    cntkTestInput = np.moveaxis(modelTestInput, -1, 0).astype(np.float32)
    cntkTestInput = np.ascontiguousarray(cntkTestInput)

    # Get the equivalent CNTK model
    if not maxLayers:
        _logger.info("\nRunning original CNTK model...")

        _, out = z.forward({
            z.arguments[0]: [cntkTestInput],
            z.arguments[1]: [list(range(len(labels)))]
        })
        for output in z.outputs:
            if (output.shape == (len(labels), )):
                out = out[output]
        cntkResults = softmax(out[0]).eval()

        # For the full model, we compare prediction output instead of layers
        np.testing.assert_array_almost_equal(
            cntkResults, ellResults, 5,
            'prediction outputs do not match! (for model ' + modelFile + ')')
    else:
        _logger.info("\nRunning partial CNTK model...")

        if (layersToConvert[-1].layer.op_name == 'CrossEntropyWithSoftmax'
                and len(layersToConvert) > 2):
            # ugly hack for CrossEntropyWithSoftmax
            zz = as_composite(layersToConvert[-2].layer)
            zz = softmax(zz)
        else:
            zz = as_composite(layersToConvert[-1].layer)
            zz = softmax(zz)

        out = zz(cntkTestInput)
        orderedCntkModelResults = cntk_converters.\
            get_float_vector_from_cntk_array(out)

        np.testing.assert_array_almost_equal(
            orderedCntkModelResults, ellResults, 5,
            ('prediction outputs do not match! (for partial model ' +
             modelFile + ')'))
Esempio n. 12
0
    def process(self, ellLayers):
        """Helper to convert a convolutional layer to the ELL equivalent."""

        # Note that a single CNTK Convolutional function block is equivalent to the following 3 ELL layers:
        # - ConvolutionalLayer
        # - BiasLayer
        # - ActivationLayer. This layer is sometimes missing, depending on activation type.
        #
        # Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
        # padding requirements.

        weightsTensor = converters.get_float_tensor_from_cntk_convolutional_weight_parameter(
            self.weights_parameter)
        biasVector = converters.get_float_vector_from_cntk_trainable_parameter(
            self.bias_parameter)

        # Create the ELL.LayerParameters for the various ELL layers
        firstLayerParameters = ELL.LayerParameters(
            self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters,
            self.layer.ell_outputShapeMinusPadding, ELL.NoPadding())
        middleLayerParameters = ELL.LayerParameters(
            self.layer.ell_outputShapeMinusPadding, ELL.NoPadding(),
            self.layer.ell_outputShapeMinusPadding, ELL.NoPadding())
        lastLayerParameters = ELL.LayerParameters(
            self.layer.ell_outputShapeMinusPadding, ELL.NoPadding(),
            self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters)

        layerParameters = firstLayerParameters

        # Fill in the convolutional parameters
        weightsShape = self.weights_parameter.shape
        receptiveField = weightsShape[2]
        stride = self.attributes['strides'][2]

        filterBatchSize = layerParameters.outputShape.channels

        internalNodes = utilities.get_model_layers(self.layer.block_root)
        activationType = utilities.get_ell_activation_type(internalNodes)

        convolutionalParameters = ELL.ConvolutionalParameters(
            receptiveField, stride, self.convolution_method, filterBatchSize)

        # Create the ELL convolutional layer
        ellLayers.append(
            ELL.FloatConvolutionalLayer(layerParameters,
                                        convolutionalParameters,
                                        weightsTensor))

        # Create the ELL bias layer
        isSoftmaxActivation = utilities.is_softmax_activation(internalNodes)
        hasActivation = isSoftmaxActivation or activationType != None
        if (hasActivation):
            layerParameters = middleLayerParameters
        else:
            layerParameters = lastLayerParameters
        ellLayers.append(ELL.FloatBiasLayer(layerParameters, biasVector))

        # Create the ELL activation layer
        if (hasActivation):
            layerParameters = lastLayerParameters

            # Special case: if this is softmax activation, create an ELL Softmax layer.
            # Else, insert an ELL ActivationLayer
            if (isSoftmaxActivation):
                ellLayers.append(ELL.FloatSoftmaxLayer(layerParameters))
            else:
                ellLayers.append(
                    ELL.FloatActivationLayer(layerParameters, activationType))