示例#1
0
def estimate_io_tensors(model_path):
    """
    Prints estimates input/output tensor names that could be used later
    with the TFServeApp class to serve the model.

    :param str model_path: can be a '.pb' model file or a checkpoint directory.
    """
    sess = load_model(model_path)
    graph = sess.graph

    print("Possible INPUT tensors:")
    ph = [
        op.outputs[0] for op in graph.get_operations()
        if op.type == "Placeholder"
    ]
    for p in ph:
        print("\t{}".format(p.name))

    print()
    print("Possible OUTPUT tensors:")
    ops = [n for n in graph.get_operations()]

    for op in ops:
        if any(x in op.name.lower() for x in out_hints):
            for o in op.outputs:
                print("\t{}".format(o.name))

    sess.close()
示例#2
0
    def __init__(self, model_path, in_t, out_t, encode, decode, batch=False):
        """
        When constructing, the method checks that all in_t tensors are valid
        placeholders and all out_t tensors are valid tensors that exist in
        the graph.

        The objects is constructed by providing:
            * A model path
            * List of input placeholder that will be used to feed the model.
            * List of expected outputs tensors (the desired predictions).
            * encode function mapping request data to input numpy values.
            * decode function mapping output numpy values to request response data.

        :param str model_path: It can be a `.pb` file or directory containing checkpoint files.
        :param list[str] in_t: List of placeholder tensor names. Something like: ["input/image:0"]
        :param list[str] out_t: List of output tensor names. Something like: ["output/Softmax:0"]
        :param encode: python function that receives the request body data and returns a `dict` mapping
                        in_t to numpy values.
        :param decode: python function that receives a `dict` mapping out_t to numpy values and returns
                        the response data (for example, a `dict` object that will be transformed to JSON).
                        The return object of this method will be the response to the request.
                        Read it's docs for more information on how to return certain objects (for example, images).
        :param list[str] out_t: List of output tensor names. Something like: ["output/Softmax:0"]
        :param boolean batch: If False, batch dimension (required by tensorflow) will be automatically
                               handled (that is, you don't need to handle it yourself in the encode/decode functions).
                               This option is ideal when dealing with single inferences.
                               If True, you can run multiple inferences at the same time by dealing
                               with the batch dimension yourself in the encode/decode functions.

        :raises ValueError: if in_t are not all placeholder or out_t contains non-existent graph tensors
        """
        self.sess = load_model(model_path)
        self.graph = self.sess.graph

        self.in_t = [graph_utils.smart_tensor_name(x) for x in in_t]
        self.out_t = [graph_utils.smart_tensor_name(x) for x in out_t]

        graph_utils.check_tensors(self.graph, self.in_t)
        graph_utils.check_tensors(self.graph, self.out_t)

        graph_utils.check_placeholders(self.graph, self.in_t)

        self.encode = encode
        self.decode = decode

        self.batch = batch
示例#3
0
def estimate_io_tensors(model_path):
    """
    Prints estimates input/output tensor names that could be used later
    with the TFServeApp class to serve the model.

    :param str model_path: can be a '.pb' model file or a checkpoint directory.
    """
    sess = load_model(model_path)

    print("Possible INPUT tensors:")
    _print_tensors(_possible_input_tensors(sess.graph))

    print()

    print("Possible OUTPUT tensors:")
    _print_tensors(_possible_output_tensors(sess.graph))

    sess.close()
示例#4
0
class TestUtils():
    sess = loader.load_model("./tests/models/graph.pb")
    g = sess.graph

    def test_none(self):
        with pytest.raises(ValueError):
            utils.check_placeholders(None, [])

        with pytest.raises(ValueError):
            utils.check_placeholders(TestUtils.g, None)

        with pytest.raises(ValueError):
            utils.check_tensors(None, [])

        with pytest.raises(ValueError):
            utils.check_tensors(TestUtils.g, None)

        with pytest.raises(ValueError):
            utils.check_input(None, [], "")

        with pytest.raises(ValueError):
            utils.check_input([], None, "")

    def test_correct_tensors(self):
        utils.check_placeholders(TestUtils.g, ["import/x:0"])
        utils.check_tensors(TestUtils.g, ["import/x:0"])
        utils.check_tensors(TestUtils.g, ["import/out:0"])

    def test_incorrect_tensors(self):
        with pytest.raises(ValueError):
            utils.check_placeholders(TestUtils.g, ["pcodmsocs:0"])

        with pytest.raises(ValueError):
            utils.check_tensors(TestUtils.g, ["pcodmsocs:0"])

        with pytest.raises(ValueError):
            utils.check_tensors(TestUtils.g, ["import/x:0", "pcodmsocs:0"])

    def test_smart_tensor(self):
        assert utils.smart_tensor_name("carlitos") == "carlitos:0"
        assert utils.smart_tensor_name("carlitos:0") == "carlitos:0"
        assert utils.smart_tensor_name("carlitos:1") == "carlitos:1"
        assert utils.smart_tensor_name("carlitos:tevez") == "carlitos:tevez:0"
示例#5
0
 def test_pb(self):
     assert loader.load_model("./tests/models/graph.pb") is not None
示例#6
0
 def test_dir(self):
     assert loader.load_model("./tests/models/") is not None
示例#7
0
 def test_inexistent_dir(self):
     with pytest.raises(ValueError):
         loader.load_model("./non_existant/")
示例#8
0
 def test_none(self):
     with pytest.raises(ValueError):
         loader.load_model(None)