Exemple #1
0
    def test_prediction_vs_tensorflow_inceptionV3(self):
        output_col = "prediction"
        image_df = image_utils.getSampleImageDF()

        # An example of how a pre-trained keras model can be used with TFImageTransformer
        with KSessionWrap() as (sess, g):
            with g.as_default():
                K.set_learning_phase(0)    # this is important but it's on the user to call it.
                # nChannels needed for input_tensor in the InceptionV3 call below
                image_string = utils.imageInputPlaceholder(nChannels=3)
                resized_images = tf.image.resize_images(image_string,
                                                        InceptionV3Constants.INPUT_SHAPE)
                # keras expects array in RGB order, we get it from image schema in BGR => need to flip
                preprocessed = preprocess_input(imageIO._reverseChannels(resized_images))
                model = InceptionV3(input_tensor=preprocessed, weights="imagenet")
                graph = tfx.strip_and_freeze_until([model.output], g, sess, return_graph=True)

        transformer = TFImageTransformer(channelOrder='BGR', inputCol="image", outputCol=output_col, graph=graph,
                                         inputTensor=image_string, outputTensor=model.output,
                                         outputMode="vector")
        transformed_df = transformer.transform(image_df.limit(10))
        self.assertDfHasCols(transformed_df, [output_col])
        collected = transformed_df.collect()
        transformer_values, transformer_topK = self.transformOutputToComparables(collected,
                                                                                 output_col, lambda row: row['image']['origin'])

        tf_values, tf_topK = self._executeTensorflow(graph, image_string.name, model.output.name,
                                                     image_df)
        self.compareClassSets(tf_topK, transformer_topK)
        self.compareClassOrderings(tf_topK, transformer_topK)
        self.compareArrays(tf_values, transformer_values)
 def _transform(self, dataset):
     with KSessionWrap() as (sess, keras_graph):
         graph, inputTensorName, outputTensorName = self._loadTFGraph(sess=sess,
                                                                      graph=keras_graph)
         image_df = self.loadImagesInternal(dataset, self.getInputCol())
         transformer = TFImageTransformer(channelOrder='RGB', inputCol=self._loadedImageCol(),
                                          outputCol=self.getOutputCol(), graph=graph,
                                          inputTensor=inputTensorName,
                                          outputTensor=outputTensorName,
                                          outputMode=self.getOrDefault(self.outputMode))
         return transformer.transform(image_df).drop(self._loadedImageCol())
Exemple #3
0
 def _loadTFGraph(self):
     with KSessionWrap() as (sess, graph):
         assert K.backend() == "tensorflow", \
             "Keras backend is not tensorflow but KerasImageTransformer only supports " + \
             "tensorflow-backed Keras models."
         with graph.as_default():
             K.set_learning_phase(0)  # Testing phase
             model = load_model(self.getModelFile())
             out_op_name = tfx.op_name(model.output, graph)
             self._inputTensor = model.input.name
             self._outputTensor = model.output.name
             return tfx.strip_and_freeze_until([out_op_name], graph, sess, return_graph=True)
Exemple #4
0
 def _transform(self, dataset):
     with KSessionWrap() as (sess, keras_graph):
         tfGraph, inputTensorName, outputTensorName = self._loadTFGraph(sess=sess,
                                                                        graph=keras_graph)
         inputGraph = TFInputGraph.fromGraph(graph=tfGraph, sess=sess,
                                             feed_names=[inputTensorName],
                                             fetch_names=[outputTensorName])
     # Create TFTransformer & use it to apply the loaded Keras model graph to our dataset
     transformer = TFTransformer(tfInputGraph=inputGraph,
                                 inputMapping={self.getInputCol() : inputTensorName},
                                 outputMapping={outputTensorName: self.getOutputCol()})
     return transformer.transform(dataset)
 def _loadTFGraph(self):
     with KSessionWrap() as (sess, g):
         assert K.backend() == "tensorflow", \
             "Keras backend is not tensorflow but KerasImageTransformer only supports " + \
             "tensorflow-backed Keras models."
         with g.as_default():
             K.set_learning_phase(0)  # Testing phase
             model = load_model(self.getModelFile())
             out_op_name = utils.op_name(model.output)
             self._inputTensor = model.input.name
             self._outputTensor = model.output.name
             return utils.stripAndFreezeGraph(
                 g.as_graph_def(add_shapes=True), sess, [out_op_name])
    def test_prediction_vs_tensorflow_inceptionV3(self):
        output_col = "prediction"
        image_df = image_utils.getSampleImageDF()

        # An example of how a pre-trained keras model can be used with TFImageTransformer
        with KSessionWrap() as (sess, g):
            with g.as_default():
                K.set_learning_phase(
                    0)  # this is important but it's on the user to call it.
                # nChannels needed for input_tensor in the InceptionV3 call below
                image_string = utils.imageInputPlaceholder(nChannels=3)
                resized_images = tf.image.resize_images(
                    image_string, InceptionV3Constants.INPUT_SHAPE)
                preprocessed = preprocess_input(resized_images)
                model = InceptionV3(input_tensor=preprocessed,
                                    weights="imagenet")
                graph = utils.stripAndFreezeGraph(
                    g.as_graph_def(add_shapes=True), sess, [model.output])

        transformer = TFImageTransformer(inputCol="image",
                                         outputCol=output_col,
                                         graph=graph,
                                         inputTensor=image_string,
                                         outputTensor=model.output,
                                         outputMode="vector")
        transformed_df = transformer.transform(image_df.limit(10))
        self.assertDfHasCols(transformed_df, [output_col])
        collected = transformed_df.collect()
        transformer_values, transformer_topK = self.transformOutputToComparables(
            collected, "filePath", output_col)

        tf_values, tf_topK = self._executeTensorflow(graph, image_string.name,
                                                     model.output.name,
                                                     image_df)
        self.compareClassSets(tf_topK, transformer_topK)
        self.compareClassOrderings(tf_topK, transformer_topK)
        self.compareArrays(tf_values, transformer_values)