Пример #1
0
def predictor_from_cntk_model(modelFile, plotModel=False):
    """Loads a CNTK model and returns an ELL.NeuralNetworkPredictor"""

    print("Loading...")
    z = load_model(modelFile)
    print("\nFinished loading.")

    if plotModel:
        filename = os.path.join(os.path.dirname(modelFile),
                                os.path.basename(modelFile) + ".png")
        cntk_utilities.plot_model(z, filename)

    print("Pre-processing...")
    modelLayers = cntk_utilities.get_model_layers(z)

    # Get the relevant CNTK layers that we will convert to ELL
    layersToConvert = cntk_layers.get_filtered_layers_list(modelLayers)
    print("\nFinished pre-processing.")

    predictor = None

    try:
        # Create a list of ELL layers from the CNTK layers
        ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(
            layersToConvert)
        # Create an ELL neural network predictor from the layers
        predictor = ELL.FloatNeuralNetworkPredictor(ellLayers)
    except BaseException as exception:
        print("Error occurred attempting to convert cntk layers to ELL layers")
        raise exception

    return predictor
Пример #2
0
    def model_test_impl(self, model):
        self.reset()
        with self.subTest(model=model):
            self.cntk_model = load_model(model + '.cntk')
            modelLayers = cntk_utilities.get_model_layers(self.cntk_model)
            # Get the relevant CNTK layers that we will convert to ELL
            layersToConvert = cntk_layers.get_filtered_layers_list(modelLayers)

            self.compare_model(layersToConvert)

            self.print_top_result()
Пример #3
0
    def run(self):
        self.report = open("report.md", "w")
        self.report.write("# Comparison Results\n")
        self.report.write("**model**: %s\n\n" % (self.model_file))
        if self.image_file != None:
            self.image = self.load_image(self.image_file)
            self.report.write("**image**: %s\n\n" % (self.image_file))

        self.cntk_model = cntk.load_model(self.model_file)
        modelLayers = cntk_utilities.get_model_layers(self.cntk_model)
        # Get the relevant CNTK layers that we will convert to ELL
        layersToConvert = cntk_layers.get_filtered_layers_list(modelLayers)
        _logger.info(
            "----------------------------------------------------------------------------------"
        )
        if self.layers:
            for layer in layersToConvert:
                self.compare_layer(layer)
        else:
            self.compare_model(layersToConvert)

        self.print_top_result()
        self.report.close()
Пример #4
0
def compare_predictor_output(modelFile,
                             labels,
                             modelTestInput=None,
                             maxLayers=None):
    """Compares an ell.NeuralNetworkPredictor against its equivalent CNTK
    model.

    Parameters:
    modelFile -- path to the CNTK model file
    labels -- array of labels
    modelTestInput -- input data in row, column, channel ordering
    maxLayers -- integer to indicate how many layers to run before stopping.
                 Setting to None will run all layers and compare against the
                 original model.

    """

    z = load_model(modelFile)
    modelLayers = cntk_utilities.get_model_layers(z)

    # Get the relevant CNTK layers that we will convert to ELL
    layersToConvert = cntk_layers.get_filtered_layers_list(
        modelLayers, maxLayers)

    if not layersToConvert:
        raise RuntimeError("No layers are converted, nothing to test")

    # Create a list of ELL layers from the relevant CNTK layers
    _logger.info("\nCreating ELL predictor...")
    ellLayers = cntk_layers.convert_cntk_layers_to_ell_layers(layersToConvert)

    # Create an ELL neural network predictor from the relevant CNTK layers
    predictor = ell.neural.FloatNeuralNetworkPredictor(ellLayers)

    if not modelTestInput:
        inputShape = predictor.GetInputShape()
        modelTestInput = np.random.uniform(
            low=0,
            high=255,
            size=(inputShape.rows, inputShape.columns,
                  inputShape.channels)).astype(np.float_)

    ellTestInput = modelTestInput.ravel()  # rows, columns, channels
    ellResults = predictor.Predict(ellTestInput)

    # rows, columns, channels => channels, rows, columns
    cntkTestInput = np.moveaxis(modelTestInput, -1, 0).astype(np.float32)
    cntkTestInput = np.ascontiguousarray(cntkTestInput)

    # Get the equivalent CNTK model
    if not maxLayers:
        _logger.info("\nRunning original CNTK model...")

        _, out = z.forward({
            z.arguments[0]: [cntkTestInput],
            z.arguments[1]: [list(range(len(labels)))]
        })
        for output in z.outputs:
            if (output.shape == (len(labels), )):
                out = out[output]
        cntkResults = softmax(out[0]).eval()

        # For the full model, we compare prediction output instead of layers
        np.testing.assert_array_almost_equal(
            cntkResults, ellResults, 5,
            'prediction outputs do not match! (for model ' + modelFile + ')')
    else:
        _logger.info("\nRunning partial CNTK model...")

        if (layersToConvert[-1].layer.op_name == 'CrossEntropyWithSoftmax'
                and len(layersToConvert) > 2):
            # ugly hack for CrossEntropyWithSoftmax
            zz = as_composite(layersToConvert[-2].layer)
            zz = softmax(zz)
        else:
            zz = as_composite(layersToConvert[-1].layer)
            zz = softmax(zz)

        out = zz(cntkTestInput)
        orderedCntkModelResults = cntk_converters.\
            get_float_vector_from_cntk_array(out)

        np.testing.assert_array_almost_equal(
            orderedCntkModelResults, ellResults, 5,
            ('prediction outputs do not match! (for partial model ' +
             modelFile + ')'))