Exemplo n.º 1
0
def predictor_from_cntk_model(modelFile, plotModel=False):
    """Loads a CNTK model and returns an ELL.NeuralNetworkPredictor"""

    print("Loading...")
    z = load_model(modelFile)
    print("\nFinished loading.")

    if plotModel:
        filename = os.path.join(os.path.dirname(modelFile),
                                os.path.basename(modelFile) + ".png")
        cntk_utilities.plot_model(z, filename)

    print("Pre-processing...")
    modelLayers = cntk_utilities.get_model_layers(z)

    # Get the relevant CNTK layers that we will convert to ELL
    layersToConvert = cntk_layers.get_filtered_layers_list(modelLayers)
    print("\nFinished pre-processing.")

    predictor = None

    try:
        # Create a list of ELL layers from the CNTK layers
        ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(
            layersToConvert)
        # Create an ELL neural network predictor from the layers
        predictor = ELL.FloatNeuralNetworkPredictor(ellLayers)
    except BaseException as exception:
        print("Error occurred attempting to convert cntk layers to ELL layers")
        raise exception

    return predictor
Exemplo n.º 2
0
    def compare_model(self, layers):
        ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(layers)
        # Create an ELL neural network predictor from the layers
        predictor = ell.neural.FloatNeuralNetworkPredictor(ellLayers)
        shape = predictor.GetInputShape()

        # to CNTK (channel, rows, columns) order
        self.input_shape = (shape.channels, shape.rows, shape.columns)
        self.data = self.get_input_data()

        if len(self.cntk_model.arguments) > 1:
            output = np.zeros(self.cntk_model.arguments[1].shape,
                              dtype=np.float32)
            predictions = self.cntk_model.eval({
                self.cntk_model.arguments[0]: [self.data],
                self.cntk_model.arguments[1]:
                [list(range(len(self.categories)))]
            })
        else:
            predictions = self.cntk_model.eval(
                {self.cntk_model.arguments[0]: [self.data]})

        size = 0
        cntk_output = None
        if isinstance(predictions, dict):
            for key in self.cntk_model.outputs:
                shape = key.shape
                if shape:
                    s = np.max(shape)
                    if s > size:
                        size = s
                        # CNTK models currently don't have softmax operations
                        # right now, so we work around it by including it
                        # explicitly
                        cntk_output = softmax(predictions[key][0]).eval()
        else:
            cntk_output = softmax(predictions).eval()

        self.verify_ell("Softmax", predictor, self.data, cntk_output)
Exemplo n.º 3
0
    def compare_model(self, layers):
        ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(layers)
        # Create an ELL neural network predictor from the layers
        predictor = ell.neural.NeuralNetworkPredictor(ellLayers)
        shape = predictor.GetInputShape()
        self.input_shape = (shape.channels, shape.rows, shape.columns
                            )  # to CNTK (channel, rows, coumns) order
        self.data = self.get_input_data()

        if (len(self.cntk_model.arguments) > 1):
            output = np.zeros(self.cntk_model.arguments[1].shape).astype(
                np.float32)
            predictions = self.cntk_model.eval({
                self.cntk_model.arguments[0]: [self.data],
                self.cntk_model.arguments[1]:
                output
            })
        else:
            predictions = self.cntk_model.eval(
                {self.cntk_model.arguments[0]: [self.data]})

        size = 0
        output = None
        if isinstance(predictions, dict):
            for key in self.cntk_model.outputs:
                shape = key.shape
                if len(shape) > 0:
                    s = np.max(shape)
                    if (s > size):
                        size = s
                        output = predictions[key][0] / 100
        else:
            output = predictions[0]

        self.verify_ell("Softmax", predictor, self.data, output)
        self.data = output  # make this the input to the next layer.
        self.save_layer_outputs("Softmax")
Exemplo n.º 4
0
def compare_predictor_output(modelFile,
                             labels,
                             modelTestInput=None,
                             maxLayers=None):
    """Compares an ell.NeuralNetworkPredictor against its equivalent CNTK
    model.

    Parameters:
    modelFile -- path to the CNTK model file
    labels -- array of labels
    modelTestInput -- input data in row, column, channel ordering
    maxLayers -- integer to indicate how many layers to run before stopping.
                 Setting to None will run all layers and compare against the
                 original model.

    """

    z = load_model(modelFile)
    modelLayers = cntk_utilities.get_model_layers(z)

    # Get the relevant CNTK layers that we will convert to ELL
    layersToConvert = cntk_layers.get_filtered_layers_list(
        modelLayers, maxLayers)

    if not layersToConvert:
        raise RuntimeError("No layers are converted, nothing to test")

    # Create a list of ELL layers from the relevant CNTK layers
    _logger.info("\nCreating ELL predictor...")
    ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(layersToConvert)

    # Create an ELL neural network predictor from the relevant CNTK layers
    predictor = ell.neural.FloatNeuralNetworkPredictor(ellLayers)

    if not modelTestInput:
        inputShape = predictor.GetInputShape()
        modelTestInput = np.random.uniform(
            low=0,
            high=255,
            size=(inputShape.rows, inputShape.columns,
                  inputShape.channels)).astype(np.float_)

    ellTestInput = modelTestInput.ravel()  # rows, columns, channels
    ellResults = predictor.Predict(ellTestInput)

    # rows, columns, channels => channels, rows, columns
    cntkTestInput = np.moveaxis(modelTestInput, -1, 0).astype(np.float32)
    cntkTestInput = np.ascontiguousarray(cntkTestInput)

    # Get the equivalent CNTK model
    if not maxLayers:
        _logger.info("\nRunning original CNTK model...")

        _, out = z.forward({
            z.arguments[0]: [cntkTestInput],
            z.arguments[1]: [list(range(len(labels)))]
        })
        for output in z.outputs:
            if (output.shape == (len(labels), )):
                out = out[output]
        cntkResults = softmax(out[0]).eval()

        # For the full model, we compare prediction output instead of layers
        np.testing.assert_array_almost_equal(
            cntkResults, ellResults, 5,
            'prediction outputs do not match! (for model ' + modelFile + ')')
    else:
        _logger.info("\nRunning partial CNTK model...")

        if (layersToConvert[-1].layer.op_name == 'CrossEntropyWithSoftmax'
                and len(layersToConvert) > 2):
            # ugly hack for CrossEntropyWithSoftmax
            zz = as_composite(layersToConvert[-2].layer)
            zz = softmax(zz)
        else:
            zz = as_composite(layersToConvert[-1].layer)
            zz = softmax(zz)

        out = zz(cntkTestInput)
        orderedCntkModelResults = cntk_converters.\
            get_float_vector_from_cntk_array(out)

        np.testing.assert_array_almost_equal(
            orderedCntkModelResults, ellResults, 5,
            ('prediction outputs do not match! (for partial model ' +
             modelFile + ')'))