Пример #1
0
    def test_incorrect_tensors(self):
        with pytest.raises(ValueError):
            utils.check_placeholders(TestUtils.g, ["pcodmsocs:0"])

        with pytest.raises(ValueError):
            utils.check_tensors(TestUtils.g, ["pcodmsocs:0"])

        with pytest.raises(ValueError):
            utils.check_tensors(TestUtils.g, ["import/x:0", "pcodmsocs:0"])
Пример #2
0
    def __init__(self, model_path, in_t, out_t, encode, decode, batch=False):
        """
        When constructing, the method checks that all in_t tensors are valid
        placeholders and all out_t tensors are valid tensors that exist in
        the graph.

        The objects is constructed by providing:
            * A model path
            * List of input placeholder that will be used to feed the model.
            * List of expected outputs tensors (the desired predictions).
            * encode function mapping request data to input numpy values.
            * decode function mapping output numpy values to request response data.

        :param str model_path: It can be a `.pb` file or directory containing checkpoint files.
        :param list[str] in_t: List of placeholder tensor names. Something like: ["input/image:0"]
        :param list[str] out_t: List of output tensor names. Something like: ["output/Softmax:0"]
        :param encode: python function that receives the request body data and returns a `dict` mapping
                        in_t to numpy values.
        :param decode: python function that receives a `dict` mapping out_t to numpy values and returns
                        the response data (for example, a `dict` object that will be transformed to JSON).
                        The return object of this method will be the response to the request.
                        Read it's docs for more information on how to return certain objects (for example, images).
        :param list[str] out_t: List of output tensor names. Something like: ["output/Softmax:0"]
        :param boolean batch: If False, batch dimension (required by tensorflow) will be automatically
                               handled (that is, you don't need to handle it yourself in the encode/decode functions).
                               This option is ideal when dealing with single inferences.
                               If True, you can run multiple inferences at the same time by dealing
                               with the batch dimension yourself in the encode/decode functions.

        :raises ValueError: if in_t are not all placeholder or out_t contains non-existent graph tensors
        """
        self.sess = load_model(model_path)
        self.graph = self.sess.graph

        self.in_t = [graph_utils.smart_tensor_name(x) for x in in_t]
        self.out_t = [graph_utils.smart_tensor_name(x) for x in out_t]

        graph_utils.check_tensors(self.graph, self.in_t)
        graph_utils.check_tensors(self.graph, self.out_t)

        graph_utils.check_placeholders(self.graph, self.in_t)

        self.encode = encode
        self.decode = decode

        self.batch = batch
Пример #3
0
    def test_none(self):
        with pytest.raises(ValueError):
            utils.check_placeholders(None, [])

        with pytest.raises(ValueError):
            utils.check_placeholders(TestUtils.g, None)

        with pytest.raises(ValueError):
            utils.check_tensors(None, [])

        with pytest.raises(ValueError):
            utils.check_tensors(TestUtils.g, None)

        with pytest.raises(ValueError):
            utils.check_input(None, [], "")

        with pytest.raises(ValueError):
            utils.check_input([], None, "")
Пример #4
0
 def test_correct_tensors(self):
     utils.check_placeholders(TestUtils.g, ["import/x:0"])
     utils.check_tensors(TestUtils.g, ["import/x:0"])
     utils.check_tensors(TestUtils.g, ["import/out:0"])