Ejemplo n.º 1
0
 def test_list_backends(self):
     if not len(openvino_tensorflow.list_backends()):
         raise AssertionError
Ejemplo n.º 2
0
        input_mean = args.input_mean
    if args.input_std:
        input_std = args.input_std
    if args.backend:
        backend_name = args.backend

    if model_file == "":
        model = hub.load(
            "https://tfhub.dev/google/imagenet/inception_v3/classification/4")
    else:
        model = tf.saved_model.load(model_file)

    if not args.disable_ovtf:
        #Print list of available backends
        print('Available Backends:')
        backends_list = ovtf.list_backends()
        for backend in backends_list:
            print(backend)
        ovtf.set_backend(backend_name)
    else:
        ovtf.disable()

    #Load the labels
    cap = None
    images = []
    if label_file:
        labels = load_labels(label_file)
    input_mode = get_input_mode(input_file)
    if input_mode == "video":
        cap = cv2.VideoCapture(input_file)
    elif input_mode == "camera":
    def test_set_backend(self):
        # store env variables
        # when testing on backends like GPU the tests are run with OPENVINO_TF_BACKEND
        # by storing and restoring the env_variables we run the tests independent of the backend set
        # currently we store and restore only the OPENVINO_TF_BACKEND
        env_var_map = self.store_env_variables(["OPENVINO_TF_BACKEND"])
        self.unset_env_variable("OPENVINO_TF_BACKEND")

        # test
        openvino_tensorflow.enable()
        backend_cpu = 'CPU'
        backend_interpreter = 'INTERPRETER'

        found_cpu = False
        found_interpreter = False
        # These will only print when running pytest with flag "-s"
        supported_backends = openvino_tensorflow.list_backends()
        print("Number of supported backends ", len(supported_backends))
        print(" ****** Supported Backends ****** ")
        for backend_name in supported_backends:
            print(backend_name)
            if backend_name == backend_cpu:
                found_cpu = True
            if backend_name == backend_interpreter:
                found_interpreter = True
        print(" ******************************** ")
        if not (found_cpu and found_interpreter) == True:
            raise AssertionError

        # Create Graph
        val = tf.compat.v1.placeholder(tf.float32)
        out1 = tf.abs(val)
        out2 = tf.abs(out1)

        # set INTERPRETER backend
        openvino_tensorflow.set_backend(backend_interpreter)
        current_backend = openvino_tensorflow.get_backend()
        if not current_backend == backend_interpreter:
            raise AssertionError

        # create new session to execute graph
        # If you want to re-confirm which backend the graph was executed
        # currently the only way is to enable OPENVINO_TF_VLOG_LEVEL=5
        with tf.compat.v1.Session() as sess:
            sess.run((out2, ), feed_dict={val: ((1.4, -0.5, -1))})
        current_backend = openvino_tensorflow.get_backend()
        if not current_backend == backend_interpreter:
            raise AssertionError

        # set CPU backend
        openvino_tensorflow.set_backend(backend_cpu)
        current_backend = openvino_tensorflow.get_backend()
        if not current_backend == backend_cpu:
            raise AssertionError
        # create new session to execute graph
        with tf.compat.v1.Session() as sess:
            sess.run((out2, ), feed_dict={val: ((1.4, -0.5, -1))})
        current_backend = openvino_tensorflow.get_backend()
        if not current_backend == backend_cpu:
            raise AssertionError

        # restore env_variables
        self.restore_env_variables(env_var_map)